+ xReturn = pdPASS;\r
+ }\r
+ else\r
+ {\r
+ xReturn = pdFAIL;\r
+ }\r
+ }\r
+ #else\r
+ {\r
+ /* The Idle task is being created using dynamically allocated RAM. */\r
+ xReturn = xTaskCreate( prvIdleTask,\r
+ configIDLE_TASK_NAME,\r
+ configMINIMAL_STACK_SIZE,\r
+ ( void * ) NULL,\r
+ portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */\r
+ &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */\r
+ }\r
+ #endif /* configSUPPORT_STATIC_ALLOCATION */\r
+\r
+ #if ( configUSE_TIMERS == 1 )\r
+ {\r
+ if( xReturn == pdPASS )\r
+ {\r
+ xReturn = xTimerCreateTimerTask();\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_TIMERS */\r
+\r
+ if( xReturn == pdPASS )\r
+ {\r
+ /* freertos_tasks_c_additions_init() should only be called if the user\r
+ definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is\r
+ the only macro called by the function. */\r
+ #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT\r
+ {\r
+ freertos_tasks_c_additions_init();\r
+ }\r
+ #endif\r
+\r
+ /* Interrupts are turned off here, to ensure a tick does not occur\r
+ before or during the call to xPortStartScheduler(). The stacks of\r
+ the created tasks contain a status word with interrupts switched on\r
+ so interrupts will automatically get re-enabled when the first task\r
+ starts to run. */\r
+ portDISABLE_INTERRUPTS();\r
+\r
+ #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
+ {\r
+ /* Switch Newlib's _impure_ptr variable to point to the _reent\r
+ structure specific to the task that will run first. */\r
+ _impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
+ }\r
+ #endif /* configUSE_NEWLIB_REENTRANT */\r
+\r
+ xNextTaskUnblockTime = portMAX_DELAY;\r
+ xSchedulerRunning = pdTRUE;\r
+ xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;\r
+\r
+ /* If configGENERATE_RUN_TIME_STATS is defined then the following\r
+ macro must be defined to configure the timer/counter used to generate\r
+ the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS\r
+ is set to 0 and the following line fails to build then ensure you do not\r
+ have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your\r
+ FreeRTOSConfig.h file. */\r
+ portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();\r
+\r
+ traceTASK_SWITCHED_IN();\r
+\r
+ /* Setting up the timer tick is hardware specific and thus in the\r
+ portable interface. */\r
+ if( xPortStartScheduler() != pdFALSE )\r
+ {\r
+ /* Should not reach here as if the scheduler is running the\r
+ function will not return. */\r
+ }\r
+ else\r
+ {\r
+ /* Should only reach here if a task calls xTaskEndScheduler(). */\r
+ }\r
+ }\r
+ else\r
+ {\r
+ /* This line will only be reached if the kernel could not be started,\r
+ because there was not enough FreeRTOS heap to create the idle task\r
+ or the timer task. */\r
+ configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );\r
+ }\r
+\r
+ /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,\r
+ meaning xIdleTaskHandle is not used anywhere else. */\r
+ ( void ) xIdleTaskHandle;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskEndScheduler( void )\r
+{\r
+ /* Stop the scheduler interrupts and call the portable scheduler end\r
+ routine so the original ISRs can be restored if necessary. The port\r
+ layer must ensure interrupts enable bit is left in the correct state. */\r
+ portDISABLE_INTERRUPTS();\r
+ xSchedulerRunning = pdFALSE;\r
+ vPortEndScheduler();\r
+}\r
+/*----------------------------------------------------------*/\r
+\r
+void vTaskSuspendAll( void )\r
+{\r
+ /* A critical section is not required as the variable is of type\r
+ BaseType_t. Please read Richard Barry's reply in the following link to a\r
+ post in the FreeRTOS support forum before reporting this as a bug! -\r
+ http://goo.gl/wu4acr */\r
+ ++uxSchedulerSuspended;\r
+ portMEMORY_BARRIER();\r
+}\r
+/*----------------------------------------------------------*/\r
+\r
+#if ( configUSE_TICKLESS_IDLE != 0 )\r
+\r
+ static TickType_t prvGetExpectedIdleTime( void )\r
+ {\r
+ TickType_t xReturn;\r
+ UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;\r
+\r
+ /* uxHigherPriorityReadyTasks takes care of the case where\r
+ configUSE_PREEMPTION is 0, so there may be tasks above the idle priority\r
+ task that are in the Ready state, even though the idle task is\r
+ running. */\r
+ #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )\r
+ {\r
+ if( uxTopReadyPriority > tskIDLE_PRIORITY )\r
+ {\r
+ uxHigherPriorityReadyTasks = pdTRUE;\r
+ }\r
+ }\r
+ #else\r
+ {\r
+ const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;\r
+\r
+ /* When port optimised task selection is used the uxTopReadyPriority\r
+ variable is used as a bit map. If bits other than the least\r
+ significant bit are set then there are tasks that have a priority\r
+ above the idle priority that are in the Ready state. This takes\r
+ care of the case where the co-operative scheduler is in use. */\r
+ if( uxTopReadyPriority > uxLeastSignificantBit )\r
+ {\r
+ uxHigherPriorityReadyTasks = pdTRUE;\r
+ }\r
+ }\r
+ #endif\r
+\r
+ if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )\r
+ {\r
+ xReturn = 0;\r
+ }\r
+ else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )\r
+ {\r
+ /* There are other idle priority tasks in the ready state. If\r
+ time slicing is used then the very next tick interrupt must be\r
+ processed. */\r
+ xReturn = 0;\r
+ }\r
+ else if( uxHigherPriorityReadyTasks != pdFALSE )\r
+ {\r
+ /* There are tasks in the Ready state that have a priority above the\r
+ idle priority. This path can only be reached if\r
+ configUSE_PREEMPTION is 0. */\r
+ xReturn = 0;\r
+ }\r
+ else\r
+ {\r
+ xReturn = xNextTaskUnblockTime - xTickCount;\r
+ }\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif /* configUSE_TICKLESS_IDLE */\r
+/*----------------------------------------------------------*/\r
+\r
+BaseType_t xTaskResumeAll( void )\r
+{\r
+TCB_t *pxTCB = NULL;\r
+BaseType_t xAlreadyYielded = pdFALSE;\r
+\r
+ /* If uxSchedulerSuspended is zero then this function does not match a\r
+ previous call to vTaskSuspendAll(). */\r
+ configASSERT( uxSchedulerSuspended );\r
+\r
+ /* It is possible that an ISR caused a task to be removed from an event\r
+ list while the scheduler was suspended. If this was the case then the\r
+ removed task will have been added to the xPendingReadyList. Once the\r
+ scheduler has been resumed it is safe to move all the pending ready\r
+ tasks from this list into their appropriate ready list. */\r
+ taskENTER_CRITICAL();\r
+ {\r
+ --uxSchedulerSuspended;\r
+\r
+ if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )\r
+ {\r
+ if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )\r
+ {\r
+ /* Move any readied tasks from the pending list into the\r
+ appropriate ready list. */\r
+ while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )\r
+ {\r
+ pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+ ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+ ( void ) uxListRemove( &( pxTCB->xStateListItem ) );\r
+ prvAddTaskToReadyList( pxTCB );\r
+\r
+ /* If the moved task has a priority higher than the current\r
+ task then a yield must be performed. */\r
+ if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )\r
+ {\r
+ xYieldPending = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+\r
+ if( pxTCB != NULL )\r
+ {\r
+ /* A task was unblocked while the scheduler was suspended,\r
+ which may have prevented the next unblock time from being\r
+ re-calculated, in which case re-calculate it now. Mainly\r
+ important for low power tickless implementations, where\r
+ this can prevent an unnecessary exit from low power\r
+ state. */\r
+ prvResetNextTaskUnblockTime();\r
+ }\r
+\r
+ /* If any ticks occurred while the scheduler was suspended then\r
+ they should be processed now. This ensures the tick count does\r
+ not slip, and that any delayed tasks are resumed at the correct\r
+ time. */\r
+ {\r
+ UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */\r
+\r
+ if( uxPendedCounts > ( UBaseType_t ) 0U )\r
+ {\r
+ do\r
+ {\r
+ if( xTaskIncrementTick() != pdFALSE )\r
+ {\r
+ xYieldPending = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ --uxPendedCounts;\r
+ } while( uxPendedCounts > ( UBaseType_t ) 0U );\r
+\r
+ uxPendedTicks = 0;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+\r
+ if( xYieldPending != pdFALSE )\r
+ {\r
+ #if( configUSE_PREEMPTION != 0 )\r
+ {\r
+ xAlreadyYielded = pdTRUE;\r
+ }\r
+ #endif\r
+ taskYIELD_IF_USING_PREEMPTION();\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ taskEXIT_CRITICAL();\r
+\r
+ return xAlreadyYielded;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+TickType_t xTaskGetTickCount( void )\r
+{\r
+TickType_t xTicks;\r
+\r
+ /* Critical section required if running on a 16 bit processor. */\r
+ portTICK_TYPE_ENTER_CRITICAL();\r
+ {\r
+ xTicks = xTickCount;\r
+ }\r
+ portTICK_TYPE_EXIT_CRITICAL();\r
+\r
+ return xTicks;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+TickType_t xTaskGetTickCountFromISR( void )\r
+{\r
+TickType_t xReturn;\r
+UBaseType_t uxSavedInterruptStatus;\r
+\r
+ /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+ system call (or maximum API call) interrupt priority. Interrupts that are\r
+ above the maximum system call priority are kept permanently enabled, even\r
+ when the RTOS kernel is in a critical section, but cannot make any calls to\r
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h\r
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+ failure if a FreeRTOS API function is called from an interrupt that has been\r
+ assigned a priority above the configured maximum system call priority.\r
+ Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+ that have been assigned a priority at or (logically) below the maximum\r
+ system call interrupt priority. FreeRTOS maintains a separate interrupt\r
+ safe API to ensure interrupt entry is as fast and as simple as possible.\r
+ More information (albeit Cortex-M specific) is provided on the following\r
+ link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+ uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();\r
+ {\r
+ xReturn = xTickCount;\r
+ }\r
+ portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+ return xReturn;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+UBaseType_t uxTaskGetNumberOfTasks( void )\r
+{\r
+ /* A critical section is not required because the variables are of type\r
+ BaseType_t. */\r
+ return uxCurrentNumberOfTasks;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
+{\r
+TCB_t *pxTCB;\r
+\r
+ /* If null is passed in here then the name of the calling task is being\r
+ queried. */\r
+ pxTCB = prvGetTCBFromHandle( xTaskToQuery );\r
+ configASSERT( pxTCB );\r
+ return &( pxTCB->pcTaskName[ 0 ] );\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_xTaskGetHandle == 1 )\r
+\r
+ static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )\r
+ {\r
+ TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;\r
+ UBaseType_t x;\r
+ char cNextChar;\r
+ BaseType_t xBreakLoop;\r
+\r
+ /* This function is called with the scheduler suspended. */\r
+\r
+ if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )\r
+ {\r
+ listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+\r
+ do\r
+ {\r
+ listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+\r
+ /* Check each character in the name looking for a match or\r
+ mismatch. */\r
+ xBreakLoop = pdFALSE;\r
+ for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )\r
+ {\r
+ cNextChar = pxNextTCB->pcTaskName[ x ];\r
+\r
+ if( cNextChar != pcNameToQuery[ x ] )\r
+ {\r
+ /* Characters didn't match. */\r
+ xBreakLoop = pdTRUE;\r
+ }\r
+ else if( cNextChar == ( char ) 0x00 )\r
+ {\r
+ /* Both strings terminated, a match must have been\r
+ found. */\r
+ pxReturn = pxNextTCB;\r
+ xBreakLoop = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ if( xBreakLoop != pdFALSE )\r
+ {\r
+ break;\r
+ }\r
+ }\r
+\r
+ if( pxReturn != NULL )\r
+ {\r
+ /* The handle has been found. */\r
+ break;\r
+ }\r
+\r
+ } while( pxNextTCB != pxFirstTCB );\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ return pxReturn;\r
+ }\r
+\r
+#endif /* INCLUDE_xTaskGetHandle */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_xTaskGetHandle == 1 )\r
+\r
+ TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
+ {\r
+ UBaseType_t uxQueue = configMAX_PRIORITIES;\r
+ TCB_t* pxTCB;\r
+\r
+ /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */\r
+ configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );\r
+\r
+ vTaskSuspendAll();\r
+ {\r
+ /* Search the ready lists. */\r
+ do\r
+ {\r
+ uxQueue--;\r
+ pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );\r
+\r
+ if( pxTCB != NULL )\r
+ {\r
+ /* Found the handle. */\r
+ break;\r
+ }\r
+\r
+ } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+\r
+ /* Search the delayed lists. */\r
+ if( pxTCB == NULL )\r
+ {\r
+ pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );\r
+ }\r
+\r
+ if( pxTCB == NULL )\r
+ {\r
+ pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );\r
+ }\r
+\r
+ #if ( INCLUDE_vTaskSuspend == 1 )\r
+ {\r
+ if( pxTCB == NULL )\r
+ {\r
+ /* Search the suspended list. */\r
+ pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );\r
+ }\r
+ }\r
+ #endif\r
+\r
+ #if( INCLUDE_vTaskDelete == 1 )\r
+ {\r
+ if( pxTCB == NULL )\r
+ {\r
+ /* Search the deleted list. */\r
+ pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );\r
+ }\r
+ }\r
+ #endif\r
+ }\r
+ ( void ) xTaskResumeAll();\r
+\r
+ return pxTCB;\r
+ }\r
+\r
+#endif /* INCLUDE_xTaskGetHandle */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_TRACE_FACILITY == 1 )\r
+\r
+ UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )\r
+ {\r
+ UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;\r
+\r
+ vTaskSuspendAll();\r
+ {\r
+ /* Is there a space in the array for each task in the system? */\r
+ if( uxArraySize >= uxCurrentNumberOfTasks )\r
+ {\r
+ /* Fill in an TaskStatus_t structure with information on each\r
+ task in the Ready state. */\r
+ do\r
+ {\r
+ uxQueue--;\r
+ uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );\r
+\r
+ } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+\r
+ /* Fill in an TaskStatus_t structure with information on each\r
+ task in the Blocked state. */\r
+ uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );\r
+ uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );\r
+\r
+ #if( INCLUDE_vTaskDelete == 1 )\r
+ {\r
+ /* Fill in an TaskStatus_t structure with information on\r
+ each task that has been deleted but not yet cleaned up. */\r
+ uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );\r
+ }\r
+ #endif\r
+\r
+ #if ( INCLUDE_vTaskSuspend == 1 )\r
+ {\r
+ /* Fill in an TaskStatus_t structure with information on\r
+ each task in the Suspended state. */\r
+ uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );\r
+ }\r
+ #endif\r
+\r
+ #if ( configGENERATE_RUN_TIME_STATS == 1)\r
+ {\r
+ if( pulTotalRunTime != NULL )\r
+ {\r
+ #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE\r
+ portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );\r
+ #else\r
+ *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();\r
+ #endif\r
+ }\r
+ }\r
+ #else\r
+ {\r
+ if( pulTotalRunTime != NULL )\r
+ {\r
+ *pulTotalRunTime = 0;\r
+ }\r
+ }\r
+ #endif\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ ( void ) xTaskResumeAll();\r
+\r
+ return uxTask;\r
+ }\r
+\r
+#endif /* configUSE_TRACE_FACILITY */\r
+/*----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )\r
+\r
+ TaskHandle_t xTaskGetIdleTaskHandle( void )\r
+ {\r
+ /* If xTaskGetIdleTaskHandle() is called before the scheduler has been\r
+ started, then xIdleTaskHandle will be NULL. */\r
+ configASSERT( ( xIdleTaskHandle != NULL ) );\r
+ return xIdleTaskHandle;\r
+ }\r
+\r
+#endif /* INCLUDE_xTaskGetIdleTaskHandle */\r
+/*----------------------------------------------------------*/\r
+\r
+/* This conditional compilation should use inequality to 0, not equality to 1.\r
+This is to ensure vTaskStepTick() is available when user defined low power mode\r
+implementations require configUSE_TICKLESS_IDLE to be set to a value other than\r
+1. */\r
+#if ( configUSE_TICKLESS_IDLE != 0 )\r
+\r
+ void vTaskStepTick( const TickType_t xTicksToJump )\r
+ {\r
+ /* Correct the tick count value after a period during which the tick\r
+ was suppressed. Note this does *not* call the tick hook function for\r
+ each stepped tick. */\r
+ configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );\r
+ xTickCount += xTicksToJump;\r
+ traceINCREASE_TICK_COUNT( xTicksToJump );\r
+ }\r
+\r
+#endif /* configUSE_TICKLESS_IDLE */\r
+/*----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_xTaskAbortDelay == 1 )\r
+\r
+ BaseType_t xTaskAbortDelay( TaskHandle_t xTask )\r
+ {\r
+ TCB_t *pxTCB = xTask;\r
+ BaseType_t xReturn;\r
+\r
+ configASSERT( pxTCB );\r
+\r
+ vTaskSuspendAll();\r
+ {\r
+ /* A task can only be prematurely removed from the Blocked state if\r
+ it is actually in the Blocked state. */\r
+ if( eTaskGetState( xTask ) == eBlocked )\r
+ {\r
+ xReturn = pdPASS;\r
+\r
+ /* Remove the reference to the task from the blocked list. An\r
+ interrupt won't touch the xStateListItem because the\r
+ scheduler is suspended. */\r
+ ( void ) uxListRemove( &( pxTCB->xStateListItem ) );\r
+\r
+ /* Is the task waiting on an event also? If so remove it from\r
+ the event list too. Interrupts can touch the event list item,\r
+ even though the scheduler is suspended, so a critical section\r
+ is used. */\r
+ taskENTER_CRITICAL();\r
+ {\r
+ if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
+ {\r
+ ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+ pxTCB->ucDelayAborted = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ taskEXIT_CRITICAL();\r
+\r
+ /* Place the unblocked task into the appropriate ready list. */\r
+ prvAddTaskToReadyList( pxTCB );\r
+\r
+ /* A task being unblocked cannot cause an immediate context\r
+ switch if preemption is turned off. */\r
+ #if ( configUSE_PREEMPTION == 1 )\r
+ {\r
+ /* Preemption is on, but a context switch should only be\r
+ performed if the unblocked task has a priority that is\r
+ equal to or higher than the currently executing task. */\r
+ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )\r
+ {\r
+ /* Pend the yield to be performed when the scheduler\r
+ is unsuspended. */\r
+ xYieldPending = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_PREEMPTION */\r
+ }\r
+ else\r
+ {\r
+ xReturn = pdFAIL;\r
+ }\r
+ }\r
+ ( void ) xTaskResumeAll();\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif /* INCLUDE_xTaskAbortDelay */\r
+/*----------------------------------------------------------*/\r
+\r
+BaseType_t xTaskIncrementTick( void )\r
+{\r
+TCB_t * pxTCB;\r
+TickType_t xItemValue;\r
+BaseType_t xSwitchRequired = pdFALSE;\r
+\r
+ /* Called by the portable layer each time a tick interrupt occurs.\r
+ Increments the tick then checks to see if the new tick value will cause any\r
+ tasks to be unblocked. */\r
+ traceTASK_INCREMENT_TICK( xTickCount );\r
+ if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )\r
+ {\r
+ /* Minor optimisation. The tick count cannot change in this\r
+ block. */\r
+ const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;\r
+\r
+ /* Increment the RTOS tick, switching the delayed and overflowed\r
+ delayed lists if it wraps to 0. */\r
+ xTickCount = xConstTickCount;\r
+\r
+ if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */\r
+ {\r
+ taskSWITCH_DELAYED_LISTS();\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ /* See if this tick has made a timeout expire. Tasks are stored in\r
+ the queue in the order of their wake time - meaning once one task\r
+ has been found whose block time has not expired there is no need to\r
+ look any further down the list. */\r
+ if( xConstTickCount >= xNextTaskUnblockTime )\r
+ {\r
+ for( ;; )\r
+ {\r
+ if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )\r
+ {\r
+ /* The delayed list is empty. Set xNextTaskUnblockTime\r
+ to the maximum possible value so it is extremely\r
+ unlikely that the\r
+ if( xTickCount >= xNextTaskUnblockTime ) test will pass\r
+ next time through. */\r
+ xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+ break;\r
+ }\r
+ else\r
+ {\r
+ /* The delayed list is not empty, get the value of the\r
+ item at the head of the delayed list. This is the time\r
+ at which the task at the head of the delayed list must\r
+ be removed from the Blocked state. */\r
+ pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+ xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );\r
+\r
+ if( xConstTickCount < xItemValue )\r
+ {\r
+ /* It is not time to unblock this item yet, but the\r
+ item value is the time at which the task at the head\r
+ of the blocked list must be removed from the Blocked\r
+ state - so record the item value in\r
+ xNextTaskUnblockTime. */\r
+ xNextTaskUnblockTime = xItemValue;\r
+ break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ /* It is time to remove the item from the Blocked state. */\r
+ ( void ) uxListRemove( &( pxTCB->xStateListItem ) );\r
+\r
+ /* Is the task waiting on an event also? If so remove\r
+ it from the event list. */\r
+ if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
+ {\r
+ ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ /* Place the unblocked task into the appropriate ready\r
+ list. */\r
+ prvAddTaskToReadyList( pxTCB );\r
+\r
+ /* A task being unblocked cannot cause an immediate\r
+ context switch if preemption is turned off. */\r
+ #if ( configUSE_PREEMPTION == 1 )\r
+ {\r
+ /* Preemption is on, but a context switch should\r
+ only be performed if the unblocked task has a\r
+ priority that is equal to or higher than the\r
+ currently executing task. */\r
+ if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )\r
+ {\r
+ xSwitchRequired = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_PREEMPTION */\r
+ }\r
+ }\r
+ }\r
+\r
+ /* Tasks of equal priority to the currently running task will share\r
+ processing time (time slice) if preemption is on, and the application\r
+ writer has not explicitly turned time slicing off. */\r
+ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )\r
+ {\r
+ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )\r
+ {\r
+ xSwitchRequired = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */\r
+\r
+ #if ( configUSE_TICK_HOOK == 1 )\r
+ {\r
+ /* Guard against the tick hook being called when the pended tick\r
+ count is being unwound (when the scheduler is being unlocked). */\r
+ if( uxPendedTicks == ( UBaseType_t ) 0U )\r
+ {\r
+ vApplicationTickHook();\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_TICK_HOOK */\r
+ }\r
+ else\r
+ {\r
+ ++uxPendedTicks;\r
+\r
+ /* The tick hook gets called at regular intervals, even if the\r
+ scheduler is locked. */\r
+ #if ( configUSE_TICK_HOOK == 1 )\r
+ {\r
+ vApplicationTickHook();\r
+ }\r
+ #endif\r
+ }\r
+\r
+ #if ( configUSE_PREEMPTION == 1 )\r
+ {\r
+ if( xYieldPending != pdFALSE )\r
+ {\r
+ xSwitchRequired = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_PREEMPTION */\r
+\r
+ return xSwitchRequired;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
+\r
+ void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )\r
+ {\r
+ TCB_t *xTCB;\r
+\r
+ /* If xTask is NULL then it is the task hook of the calling task that is\r
+ getting set. */\r
+ if( xTask == NULL )\r
+ {\r
+ xTCB = ( TCB_t * ) pxCurrentTCB;\r
+ }\r
+ else\r
+ {\r
+ xTCB = xTask;\r
+ }\r
+\r
+ /* Save the hook function in the TCB. A critical section is required as\r
+ the value can be accessed from an interrupt. */\r
+ taskENTER_CRITICAL();\r
+ {\r
+ xTCB->pxTaskTag = pxHookFunction;\r
+ }\r
+ taskEXIT_CRITICAL();\r
+ }\r
+\r
+#endif /* configUSE_APPLICATION_TASK_TAG */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
+\r
+ TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )\r
+ {\r
+ TCB_t *pxTCB;\r
+ TaskHookFunction_t xReturn;\r
+\r
+ /* If xTask is NULL then set the calling task's hook. */\r
+ pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+ /* Save the hook function in the TCB. A critical section is required as\r
+ the value can be accessed from an interrupt. */\r
+ taskENTER_CRITICAL();\r
+ {\r
+ xReturn = pxTCB->pxTaskTag;\r
+ }\r
+ taskEXIT_CRITICAL();\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif /* configUSE_APPLICATION_TASK_TAG */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
+\r
+ TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )\r
+ {\r
+ TCB_t *pxTCB;\r
+ TaskHookFunction_t xReturn;\r
+ UBaseType_t uxSavedInterruptStatus;\r
+\r
+ /* If xTask is NULL then set the calling task's hook. */\r
+ pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+ /* Save the hook function in the TCB. A critical section is required as\r
+ the value can be accessed from an interrupt. */\r
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+ {\r
+ xReturn = pxTCB->pxTaskTag;\r
+ }\r
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif /* configUSE_APPLICATION_TASK_TAG */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
+\r
+ BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )\r
+ {\r
+ TCB_t *xTCB;\r
+ BaseType_t xReturn;\r
+\r
+ /* If xTask is NULL then we are calling our own task hook. */\r
+ if( xTask == NULL )\r
+ {\r
+ xTCB = pxCurrentTCB;\r
+ }\r
+ else\r
+ {\r
+ xTCB = xTask;\r
+ }\r
+\r
+ if( xTCB->pxTaskTag != NULL )\r
+ {\r
+ xReturn = xTCB->pxTaskTag( pvParameter );\r
+ }\r
+ else\r
+ {\r
+ xReturn = pdFAIL;\r
+ }\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif /* configUSE_APPLICATION_TASK_TAG */\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskSwitchContext( void )\r
+{\r
+ if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )\r
+ {\r
+ /* The scheduler is currently suspended - do not allow a context\r
+ switch. */\r
+ xYieldPending = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ xYieldPending = pdFALSE;\r
+ traceTASK_SWITCHED_OUT();\r
+\r
+ #if ( configGENERATE_RUN_TIME_STATS == 1 )\r
+ {\r
+ #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE\r
+ portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );\r
+ #else\r
+ ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();\r
+ #endif\r
+\r
+ /* Add the amount of time the task has been running to the\r
+ accumulated time so far. The time the task started running was\r
+ stored in ulTaskSwitchedInTime. Note that there is no overflow\r
+ protection here so count values are only valid until the timer\r
+ overflows. The guard against negative values is to protect\r
+ against suspect run time stat counter implementations - which\r
+ are provided by the application, not the kernel. */\r
+ if( ulTotalRunTime > ulTaskSwitchedInTime )\r
+ {\r
+ pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ ulTaskSwitchedInTime = ulTotalRunTime;\r
+ }\r
+ #endif /* configGENERATE_RUN_TIME_STATS */\r
+\r
+ /* Check for stack overflow, if configured. */\r
+ taskCHECK_FOR_STACK_OVERFLOW();\r
+\r
+ /* Before the currently running task is switched out, save its errno. */\r
+ #if( configUSE_POSIX_ERRNO == 1 )\r
+ {\r
+ pxCurrentTCB->iTaskErrno = FreeRTOS_errno;\r
+ }\r
+ #endif\r
+\r
+ /* Select a new task to run using either the generic C or port\r
+ optimised asm code. */\r
+ taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+ traceTASK_SWITCHED_IN();\r
+\r
+ /* After the new task is switched in, update the global errno. */\r
+ #if( configUSE_POSIX_ERRNO == 1 )\r
+ {\r
+ FreeRTOS_errno = pxCurrentTCB->iTaskErrno;\r
+ }\r
+ #endif\r
+\r
+ #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
+ {\r
+ /* Switch Newlib's _impure_ptr variable to point to the _reent\r
+ structure specific to this task. */\r
+ _impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
+ }\r
+ #endif /* configUSE_NEWLIB_REENTRANT */\r
+ }\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )\r
+{\r
+ configASSERT( pxEventList );\r
+\r
+ /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE\r
+ SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */\r
+\r
+ /* Place the event list item of the TCB in the appropriate event list.\r
+ This is placed in the list in priority order so the highest priority task\r
+ is the first to be woken by the event. The queue that contains the event\r
+ list is locked, preventing simultaneous access from interrupts. */\r
+ vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );\r
+\r
+ prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )\r
+{\r
+ configASSERT( pxEventList );\r
+\r
+ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by\r
+ the event groups implementation. */\r
+ configASSERT( uxSchedulerSuspended != 0 );\r
+\r
+ /* Store the item value in the event list item. It is safe to access the\r
+ event list item here as interrupts won't access the event list item of a\r
+ task that is not in the Blocked state. */\r
+ listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );\r
+\r
+ /* Place the event list item of the TCB at the end of the appropriate event\r
+ list. It is safe to access the event list here because it is part of an\r
+ event group implementation - and interrupts don't access event groups\r
+ directly (instead they access them indirectly by pending function calls to\r
+ the task level). */\r
+ vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );\r
+\r
+ prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+#if( configUSE_TIMERS == 1 )\r
+\r
+ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
+ {\r
+ configASSERT( pxEventList );\r
+\r
+ /* This function should not be called by application code hence the\r
+ 'Restricted' in its name. It is not part of the public API. It is\r
+ designed for use by kernel code, and has special calling requirements -\r
+ it should be called with the scheduler suspended. */\r
+\r
+\r
+ /* Place the event list item of the TCB in the appropriate event list.\r
+ In this case it is assume that this is the only task that is going to\r
+ be waiting on this event list, so the faster vListInsertEnd() function\r
+ can be used in place of vListInsert. */\r
+ vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );\r
+\r
+ /* If the task should block indefinitely then set the block time to a\r
+ value that will be recognised as an indefinite delay inside the\r
+ prvAddCurrentTaskToDelayedList() function. */\r
+ if( xWaitIndefinitely != pdFALSE )\r
+ {\r
+ xTicksToWait = portMAX_DELAY;\r
+ }\r
+\r
+ traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );\r
+ prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );\r
+ }\r
+\r
+#endif /* configUSE_TIMERS */\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )\r
+{\r
+TCB_t *pxUnblockedTCB;\r
+BaseType_t xReturn;\r
+\r
+ /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be\r
+ called from a critical section within an ISR. */\r
+\r
+ /* The event list is sorted in priority order, so the first in the list can\r
+ be removed as it is known to be the highest priority. Remove the TCB from\r
+ the delayed list, and add it to the ready list.\r
+\r
+ If an event is for a queue that is locked then this function will never\r
+ get called - the lock count on the queue will get modified instead. This\r
+ means exclusive access to the event list is guaranteed here.\r
+\r
+ This function assumes that a check has already been made to ensure that\r
+ pxEventList is not empty. */\r
+ pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+ configASSERT( pxUnblockedTCB );\r
+ ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );\r
+\r
+ if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )\r
+ {\r
+ ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );\r
+ prvAddTaskToReadyList( pxUnblockedTCB );\r
+\r
+ #if( configUSE_TICKLESS_IDLE != 0 )\r
+ {\r
+ /* If a task is blocked on a kernel object then xNextTaskUnblockTime\r
+ might be set to the blocked task's time out time. If the task is\r
+ unblocked for a reason other than a timeout xNextTaskUnblockTime is\r
+ normally left unchanged, because it is automatically reset to a new\r
+ value when the tick count equals xNextTaskUnblockTime. However if\r
+ tickless idling is used it might be more important to enter sleep mode\r
+ at the earliest possible time - so reset xNextTaskUnblockTime here to\r
+ ensure it is updated at the earliest possible time. */\r
+ prvResetNextTaskUnblockTime();\r
+ }\r
+ #endif\r
+ }\r
+ else\r
+ {\r
+ /* The delayed and ready lists cannot be accessed, so hold this task\r
+ pending until the scheduler is resumed. */\r
+ vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );\r
+ }\r
+\r
+ if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )\r
+ {\r
+ /* Return true if the task removed from the event list has a higher\r
+ priority than the calling task. This allows the calling task to know if\r
+ it should force a context switch now. */\r
+ xReturn = pdTRUE;\r
+\r
+ /* Mark that a yield is pending in case the user is not using the\r
+ "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */\r
+ xYieldPending = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ xReturn = pdFALSE;\r
+ }\r
+\r
+ return xReturn;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )\r
+{\r
+TCB_t *pxUnblockedTCB;\r
+\r
+ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by\r
+ the event flags implementation. */\r
+ configASSERT( uxSchedulerSuspended != pdFALSE );\r
+\r
+ /* Store the new item value in the event list. */\r
+ listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );\r
+\r
+ /* Remove the event list form the event flag. Interrupts do not access\r
+ event flags. */\r
+ pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
+ configASSERT( pxUnblockedTCB );\r
+ ( void ) uxListRemove( pxEventListItem );\r
+\r
+ /* Remove the task from the delayed list and add it to the ready list. The\r
+ scheduler is suspended so interrupts will not be accessing the ready\r
+ lists. */\r
+ ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );\r
+ prvAddTaskToReadyList( pxUnblockedTCB );\r
+\r
+ if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )\r
+ {\r
+ /* The unblocked task has a priority above that of the calling task, so\r
+ a context switch is required. This function is called with the\r
+ scheduler suspended so xYieldPending is set so the context switch\r
+ occurs immediately that the scheduler is resumed (unsuspended). */\r
+ xYieldPending = pdTRUE;\r
+ }\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )\r
+{\r
+ configASSERT( pxTimeOut );\r
+ taskENTER_CRITICAL();\r
+ {\r
+ pxTimeOut->xOverflowCount = xNumOfOverflows;\r
+ pxTimeOut->xTimeOnEntering = xTickCount;\r
+ }\r
+ taskEXIT_CRITICAL();\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )\r
+{\r
+ /* For internal use only as it does not use a critical section. */\r
+ pxTimeOut->xOverflowCount = xNumOfOverflows;\r
+ pxTimeOut->xTimeOnEntering = xTickCount;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )\r
+{\r
+BaseType_t xReturn;\r
+\r
+ configASSERT( pxTimeOut );\r
+ configASSERT( pxTicksToWait );\r
+\r
+ taskENTER_CRITICAL();\r
+ {\r
+ /* Minor optimisation. The tick count cannot change in this block. */\r
+ const TickType_t xConstTickCount = xTickCount;\r
+ const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;\r
+\r
+ #if( INCLUDE_xTaskAbortDelay == 1 )\r
+ if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )\r
+ {\r
+ /* The delay was aborted, which is not the same as a time out,\r
+ but has the same result. */\r
+ pxCurrentTCB->ucDelayAborted = pdFALSE;\r
+ xReturn = pdTRUE;\r
+ }\r
+ else\r
+ #endif\r
+\r
+ #if ( INCLUDE_vTaskSuspend == 1 )\r
+ if( *pxTicksToWait == portMAX_DELAY )\r
+ {\r
+ /* If INCLUDE_vTaskSuspend is set to 1 and the block time\r
+ specified is the maximum block time then the task should block\r
+ indefinitely, and therefore never time out. */\r
+ xReturn = pdFALSE;\r
+ }\r
+ else\r
+ #endif\r
+\r
+ if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */\r
+ {\r
+ /* The tick count is greater than the time at which\r
+ vTaskSetTimeout() was called, but has also overflowed since\r
+ vTaskSetTimeOut() was called. It must have wrapped all the way\r
+ around and gone past again. This passed since vTaskSetTimeout()\r
+ was called. */\r
+ xReturn = pdTRUE;\r
+ }\r
+ else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */\r
+ {\r
+ /* Not a genuine timeout. Adjust parameters for time remaining. */\r
+ *pxTicksToWait -= xElapsedTime;\r
+ vTaskInternalSetTimeOutState( pxTimeOut );\r
+ xReturn = pdFALSE;\r
+ }\r
+ else\r
+ {\r
+ *pxTicksToWait = 0;\r
+ xReturn = pdTRUE;\r
+ }\r
+ }\r
+ taskEXIT_CRITICAL();\r
+\r
+ return xReturn;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+void vTaskMissedYield( void )\r
+{\r
+ xYieldPending = pdTRUE;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_TRACE_FACILITY == 1 )\r
+\r
+ UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )\r
+ {\r
+ UBaseType_t uxReturn;\r
+ TCB_t const *pxTCB;\r
+\r
+ if( xTask != NULL )\r
+ {\r
+ pxTCB = xTask;\r
+ uxReturn = pxTCB->uxTaskNumber;\r
+ }\r
+ else\r
+ {\r
+ uxReturn = 0U;\r
+ }\r
+\r
+ return uxReturn;\r
+ }\r
+\r
+#endif /* configUSE_TRACE_FACILITY */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( configUSE_TRACE_FACILITY == 1 )\r
+\r
+ void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )\r
+ {\r
+ TCB_t * pxTCB;\r
+\r
+ if( xTask != NULL )\r
+ {\r
+ pxTCB = xTask;\r
+ pxTCB->uxTaskNumber = uxHandle;\r
+ }\r
+ }\r
+\r
+#endif /* configUSE_TRACE_FACILITY */\r
+\r
+/*\r
+ * -----------------------------------------------------------\r
+ * The Idle task.\r
+ * ----------------------------------------------------------\r
+ *\r
+ * The portTASK_FUNCTION() macro is used to allow port/compiler specific\r
+ * language extensions. The equivalent prototype for this function is:\r
+ *\r
+ * void prvIdleTask( void *pvParameters );\r
+ *\r
+ */\r
+static portTASK_FUNCTION( prvIdleTask, pvParameters )\r
+{\r
+ /* Stop warnings. */\r
+ ( void ) pvParameters;\r
+\r
+ /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE\r
+ SCHEDULER IS STARTED. **/\r
+\r
+ /* In case a task that has a secure context deletes itself, in which case\r
+ the idle task is responsible for deleting the task's secure context, if\r
+ any. */\r
+ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );\r
+\r
+ for( ;; )\r
+ {\r
+ /* See if any tasks have deleted themselves - if so then the idle task\r
+ is responsible for freeing the deleted task's TCB and stack. */\r
+ prvCheckTasksWaitingTermination();\r
+\r
+ #if ( configUSE_PREEMPTION == 0 )\r
+ {\r
+ /* If we are not using preemption we keep forcing a task switch to\r
+ see if any other task has become available. If we are using\r
+ preemption we don't need to do this as any task becoming available\r
+ will automatically get the processor anyway. */\r
+ taskYIELD();\r
+ }\r
+ #endif /* configUSE_PREEMPTION */\r
+\r
+ #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )\r
+ {\r
+ /* When using preemption tasks of equal priority will be\r
+ timesliced. If a task that is sharing the idle priority is ready\r
+ to run then the idle task should yield before the end of the\r
+ timeslice.\r
+\r
+ A critical region is not required here as we are just reading from\r
+ the list, and an occasional incorrect value will not matter. If\r
+ the ready list at the idle priority contains more than one task\r
+ then a task other than the idle task is ready to execute. */\r
+ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )\r