2 * FreeRTOS Kernel V10.0.1
\r
3 * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software.
\r
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 * http://www.FreeRTOS.org
\r
23 * http://aws.amazon.com/freertos
\r
25 * 1 tab == 4 spaces!
\r
28 /* Standard includes. */
\r
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
33 all the API functions to use the MPU wrappers. That should only be done when
\r
34 task.h is included from an application file. */
\r
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
37 /* FreeRTOS includes. */
\r
38 #include "FreeRTOS.h"
\r
41 #include "stack_macros.h"
\r
43 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
44 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
45 header files above, but not in this file, in order to generate the correct
\r
46 privileged Vs unprivileged linkage and placement. */
\r
47 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
49 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
\r
50 functions but without including stdio.h here. */
\r
51 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
\r
52 /* At the bottom of this file are two optional functions that can be used
\r
53 to generate human readable text from the raw data generated by the
\r
54 uxTaskGetSystemState() function. Note the formatting functions are provided
\r
55 for convenience only, and are NOT considered part of the kernel. */
\r
57 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
\r
59 #if( configUSE_PREEMPTION == 0 )
\r
60 /* If the cooperative scheduler is being used then a yield should not be
\r
61 performed just because a higher priority task has been woken. */
\r
62 #define taskYIELD_IF_USING_PREEMPTION()
\r
64 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
67 /* Values that can be assigned to the ucNotifyState member of the TCB. */
\r
68 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
\r
69 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
\r
70 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
\r
73 * The value used to fill the stack of a task when the task is created. This
\r
74 * is used purely for checking the high water mark for tasks.
\r
76 #define tskSTACK_FILL_BYTE ( 0xa5U )
\r
78 /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using
\r
79 dynamically allocated RAM, in which case when any task is deleted it is known
\r
80 that both the task's stack and TCB need to be freed. Sometimes the
\r
81 FreeRTOSConfig.h settings only allow a task to be created using statically
\r
82 allocated RAM, in which case when any task is deleted it is known that neither
\r
83 the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h
\r
84 settings allow a task to be created using either statically or dynamically
\r
85 allocated RAM, in which case a member of the TCB is used to record whether the
\r
86 stack and/or TCB were allocated statically or dynamically, so when a task is
\r
87 deleted the RAM that was allocated dynamically is freed again and no attempt is
\r
88 made to free the RAM that was allocated statically.
\r
89 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a
\r
90 task to be created using either statically or dynamically allocated RAM. Note
\r
91 that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with
\r
92 a statically allocated stack and a dynamically allocated TCB.
\r
93 !!!NOTE!!! If the definition of tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is
\r
94 changed then the definition of StaticTask_t must also be updated. */
\r
95 #define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
96 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
\r
97 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
\r
98 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
\r
100 /* If any of the following are set then task stacks are filled with a known
\r
101 value so the high water mark can be determined. If none of the following are
\r
102 set then don't fill the stack so there is no unnecessary dependency on memset. */
\r
103 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
\r
104 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
\r
106 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
\r
110 * Macros used by vListTask to indicate which state a task is in.
\r
112 #define tskRUNNING_CHAR ( 'X' )
\r
113 #define tskBLOCKED_CHAR ( 'B' )
\r
114 #define tskREADY_CHAR ( 'R' )
\r
115 #define tskDELETED_CHAR ( 'D' )
\r
116 #define tskSUSPENDED_CHAR ( 'S' )
\r
119 * Some kernel aware debuggers require the data the debugger needs access to be
\r
120 * global, rather than file scope.
\r
122 #ifdef portREMOVE_STATIC_QUALIFIER
\r
126 /* The name allocated to the Idle task. This can be overridden by defining
\r
127 configIDLE_TASK_NAME in FreeRTOSConfig.h. */
\r
128 #ifndef configIDLE_TASK_NAME
\r
129 #define configIDLE_TASK_NAME "IDLE"
\r
132 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
\r
134 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
\r
135 performed in a generic way that is not optimised to any particular
\r
136 microcontroller architecture. */
\r
138 /* uxTopReadyPriority holds the priority of the highest priority ready
\r
140 #define taskRECORD_READY_PRIORITY( uxPriority ) \
\r
142 if( ( uxPriority ) > uxTopReadyPriority ) \
\r
144 uxTopReadyPriority = ( uxPriority ); \
\r
146 } /* taskRECORD_READY_PRIORITY */
\r
148 /*-----------------------------------------------------------*/
\r
150 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
\r
152 UBaseType_t uxTopPriority = uxTopReadyPriority; \
\r
154 /* Find the highest priority queue that contains ready tasks. */ \
\r
155 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
\r
157 configASSERT( uxTopPriority ); \
\r
161 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
\r
162 the same priority get an equal share of the processor time. */ \
\r
163 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
\r
164 uxTopReadyPriority = uxTopPriority; \
\r
165 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
\r
167 /*-----------------------------------------------------------*/
\r
169 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
\r
170 they are only required when a port optimised method of task selection is
\r
172 #define taskRESET_READY_PRIORITY( uxPriority )
\r
173 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
\r
175 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
\r
177 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
\r
178 performed in a way that is tailored to the particular microcontroller
\r
179 architecture being used. */
\r
181 /* A port optimised version is provided. Call the port defined macros. */
\r
182 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
\r
184 /*-----------------------------------------------------------*/
\r
186 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
\r
188 UBaseType_t uxTopPriority; \
\r
190 /* Find the highest priority list that contains ready tasks. */ \
\r
191 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
\r
192 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
\r
193 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
\r
194 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
\r
196 /*-----------------------------------------------------------*/
\r
198 /* A port optimised version is provided, call it only if the TCB being reset
\r
199 is being referenced from a ready list. If it is referenced from a delayed
\r
200 or suspended list then it won't be in a ready list. */
\r
201 #define taskRESET_READY_PRIORITY( uxPriority ) \
\r
203 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
\r
205 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
\r
209 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
\r
211 /*-----------------------------------------------------------*/
\r
213 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
\r
214 count overflows. */
\r
215 #define taskSWITCH_DELAYED_LISTS() \
\r
219 /* The delayed tasks list should be empty when the lists are switched. */ \
\r
220 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
\r
222 pxTemp = pxDelayedTaskList; \
\r
223 pxDelayedTaskList = pxOverflowDelayedTaskList; \
\r
224 pxOverflowDelayedTaskList = pxTemp; \
\r
225 xNumOfOverflows++; \
\r
226 prvResetNextTaskUnblockTime(); \
\r
229 /*-----------------------------------------------------------*/
\r
232 * Place the task represented by pxTCB into the appropriate ready list for
\r
233 * the task. It is inserted at the end of the list.
\r
235 #define prvAddTaskToReadyList( pxTCB ) \
\r
236 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
\r
237 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
\r
238 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
\r
239 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
\r
240 /*-----------------------------------------------------------*/
\r
243 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
\r
244 * where NULL is used to indicate that the handle of the currently executing
\r
245 * task should be used in place of the parameter. This macro simply checks to
\r
246 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
\r
248 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? ( TCB_t * ) pxCurrentTCB : ( TCB_t * ) ( pxHandle ) )
\r
250 /* The item value of the event list item is normally used to hold the priority
\r
251 of the task to which it belongs (coded to allow it to be held in reverse
\r
252 priority order). However, it is occasionally borrowed for other purposes. It
\r
253 is important its value is not updated due to a task priority change while it is
\r
254 being used for another purpose. The following bit definition is used to inform
\r
255 the scheduler that the value should not be changed - in which case it is the
\r
256 responsibility of whichever module is using the value to ensure it gets set back
\r
257 to its original value when it is released. */
\r
258 #if( configUSE_16_BIT_TICKS == 1 )
\r
259 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
\r
261 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
\r
265 * Task control block. A task control block (TCB) is allocated for each task,
\r
266 * and stores task state information, including a pointer to the task's context
\r
267 * (the task's run time environment, including register values)
\r
269 typedef struct tskTaskControlBlock
\r
271 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
\r
273 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
274 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
\r
277 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
\r
278 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
\r
279 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
\r
280 StackType_t *pxStack; /*< Points to the start of the stack. */
\r
281 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
283 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
\r
284 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
\r
287 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
288 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
\r
291 #if ( configUSE_TRACE_FACILITY == 1 )
\r
292 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
\r
293 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
\r
296 #if ( configUSE_MUTEXES == 1 )
\r
297 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
\r
298 UBaseType_t uxMutexesHeld;
\r
301 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
302 TaskHookFunction_t pxTaskTag;
\r
305 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
\r
306 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
\r
309 #if( configGENERATE_RUN_TIME_STATS == 1 )
\r
310 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
\r
313 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
314 /* Allocate a Newlib reent structure that is specific to this task.
\r
315 Note Newlib support has been included by popular demand, but is not
\r
316 used by the FreeRTOS maintainers themselves. FreeRTOS is not
\r
317 responsible for resulting newlib operation. User must be familiar with
\r
318 newlib and must provide system-wide implementations of the necessary
\r
319 stubs. Be warned that (at the time of writing) the current newlib design
\r
320 implements a system-wide malloc() that must be provided with locks. */
\r
321 struct _reent xNewLib_reent;
\r
324 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
325 volatile uint32_t ulNotifiedValue;
\r
326 volatile uint8_t ucNotifyState;
\r
329 /* See the comments above the definition of
\r
330 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
\r
331 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */
\r
332 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
\r
335 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
336 uint8_t ucDelayAborted;
\r
341 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
\r
342 below to enable the use of older kernel aware debuggers. */
\r
343 typedef tskTCB TCB_t;
\r
345 /*lint -save -e956 A manual analysis and inspection has been used to determine
\r
346 which static variables must be declared volatile. */
\r
348 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
\r
350 /* Lists for ready and blocked tasks. --------------------*/
\r
351 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
\r
352 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
\r
353 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
\r
354 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
\r
355 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
\r
356 PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
\r
358 #if( INCLUDE_vTaskDelete == 1 )
\r
360 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
\r
361 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
\r
365 #if ( INCLUDE_vTaskSuspend == 1 )
\r
367 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
\r
371 /* Other file private variables. --------------------------------*/
\r
372 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
\r
373 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
\r
374 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
\r
375 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
\r
376 PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
\r
377 PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
\r
378 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
\r
379 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
\r
380 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
\r
381 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
\r
383 /* Context switches are held pending while the scheduler is suspended. Also,
\r
384 interrupts must not manipulate the xStateListItem of a TCB, or any of the
\r
385 lists the xStateListItem can be referenced from, if the scheduler is suspended.
\r
386 If an interrupt needs to unblock a task while the scheduler is suspended then it
\r
387 moves the task's event list item into the xPendingReadyList, ready for the
\r
388 kernel to move the task from the pending ready list into the real ready list
\r
389 when the scheduler is unsuspended. The pending ready list itself can only be
\r
390 accessed from a critical section. */
\r
391 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
\r
393 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
395 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
\r
396 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
\r
402 /*-----------------------------------------------------------*/
\r
404 /* Callback function prototypes. --------------------------*/
\r
405 #if( configCHECK_FOR_STACK_OVERFLOW > 0 )
\r
407 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
\r
411 #if( configUSE_TICK_HOOK > 0 )
\r
413 extern void vApplicationTickHook( void );
\r
417 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
419 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize );
\r
423 /* File private functions. --------------------------------*/
\r
426 * Utility task that simply returns pdTRUE if the task referenced by xTask is
\r
427 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
\r
428 * is in any other state.
\r
430 #if ( INCLUDE_vTaskSuspend == 1 )
\r
432 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
\r
434 #endif /* INCLUDE_vTaskSuspend */
\r
437 * Utility to ready all the lists used by the scheduler. This is called
\r
438 * automatically upon the creation of the first task.
\r
440 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
\r
443 * The idle task, which as all tasks is implemented as a never ending loop.
\r
444 * The idle task is automatically created and added to the ready lists upon
\r
445 * creation of the first user task.
\r
447 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
\r
448 * language extensions. The equivalent prototype for this function is:
\r
450 * void prvIdleTask( void *pvParameters );
\r
453 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
\r
456 * Utility to free all memory allocated by the scheduler to hold a TCB,
\r
457 * including the stack pointed to by the TCB.
\r
459 * This does not free memory allocated by the task itself (i.e. memory
\r
460 * allocated by calls to pvPortMalloc from within the tasks application code).
\r
462 #if ( INCLUDE_vTaskDelete == 1 )
\r
464 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
\r
469 * Used only by the idle task. This checks to see if anything has been placed
\r
470 * in the list of tasks waiting to be deleted. If so the task is cleaned up
\r
471 * and its TCB deleted.
\r
473 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
\r
476 * The currently executing task is entering the Blocked state. Add the task to
\r
477 * either the current or the overflow delayed task list.
\r
479 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
\r
482 * Fills an TaskStatus_t structure with information on each task that is
\r
483 * referenced from the pxList list (which may be a ready list, a delayed list,
\r
484 * a suspended list, etc.).
\r
486 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
\r
487 * NORMAL APPLICATION CODE.
\r
489 #if ( configUSE_TRACE_FACILITY == 1 )
\r
491 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
\r
496 * Searches pxList for a task with name pcNameToQuery - returning a handle to
\r
497 * the task if it is found, or NULL if the task is not found.
\r
499 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
501 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
\r
506 * When a task is created, the stack of the task is filled with a known value.
\r
507 * This function determines the 'high water mark' of the task stack by
\r
508 * determining how much of the stack remains at the original preset value.
\r
510 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
\r
512 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
\r
517 * Return the amount of time, in ticks, that will pass before the kernel will
\r
518 * next move a task from the Blocked state to the Running state.
\r
520 * This conditional compilation should use inequality to 0, not equality to 1.
\r
521 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
\r
522 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
\r
523 * set to a value other than 1.
\r
525 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
527 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
\r
532 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
\r
533 * will exit the Blocked state.
\r
535 static void prvResetNextTaskUnblockTime( void );
\r
537 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
\r
540 * Helper function used to pad task names with spaces when printing out
\r
541 * human readable tables of task information.
\r
543 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
\r
548 * Called after a Task_t structure has been allocated either statically or
\r
549 * dynamically to fill in the structure's members.
\r
551 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
\r
552 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
553 const uint32_t ulStackDepth,
\r
554 void * const pvParameters,
\r
555 UBaseType_t uxPriority,
\r
556 TaskHandle_t * const pxCreatedTask,
\r
558 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
\r
561 * Called after a new task has been created and initialised to place the task
\r
562 * under the control of the scheduler.
\r
564 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
\r
567 * freertos_tasks_c_additions_init() should only be called if the user definable
\r
568 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
\r
569 * called by the function.
\r
571 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
573 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
\r
577 /*-----------------------------------------------------------*/
\r
579 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
581 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
\r
582 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
583 const uint32_t ulStackDepth,
\r
584 void * const pvParameters,
\r
585 UBaseType_t uxPriority,
\r
586 StackType_t * const puxStackBuffer,
\r
587 StaticTask_t * const pxTaskBuffer )
\r
590 TaskHandle_t xReturn;
\r
592 configASSERT( puxStackBuffer != NULL );
\r
593 configASSERT( pxTaskBuffer != NULL );
\r
595 #if( configASSERT_DEFINED == 1 )
\r
597 /* Sanity check that the size of the structure used to declare a
\r
598 variable of type StaticTask_t equals the size of the real task
\r
600 volatile size_t xSize = sizeof( StaticTask_t );
\r
601 configASSERT( xSize == sizeof( TCB_t ) );
\r
603 #endif /* configASSERT_DEFINED */
\r
606 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
\r
608 /* The memory used for the task's TCB and stack are passed into this
\r
609 function - use them. */
\r
610 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
611 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
\r
613 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */
\r
615 /* Tasks can be created statically or dynamically, so note this
\r
616 task was created statically in case the task is later deleted. */
\r
617 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
\r
619 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
621 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
\r
622 prvAddNewTaskToReadyList( pxNewTCB );
\r
632 #endif /* SUPPORT_STATIC_ALLOCATION */
\r
633 /*-----------------------------------------------------------*/
\r
635 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
637 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
\r
640 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
642 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
\r
643 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
\r
645 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
\r
647 /* Allocate space for the TCB. Where the memory comes from depends
\r
648 on the implementation of the port malloc function and whether or
\r
649 not static allocation is being used. */
\r
650 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
\r
652 /* Store the stack location in the TCB. */
\r
653 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
\r
655 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
\r
657 /* Tasks can be created statically or dynamically, so note this
\r
658 task was created statically in case the task is later deleted. */
\r
659 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
\r
661 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
663 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
\r
664 pxTaskDefinition->pcName,
\r
665 ( uint32_t ) pxTaskDefinition->usStackDepth,
\r
666 pxTaskDefinition->pvParameters,
\r
667 pxTaskDefinition->uxPriority,
\r
668 pxCreatedTask, pxNewTCB,
\r
669 pxTaskDefinition->xRegions );
\r
671 prvAddNewTaskToReadyList( pxNewTCB );
\r
678 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
\r
679 /*-----------------------------------------------------------*/
\r
681 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
683 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
\r
686 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
688 configASSERT( pxTaskDefinition->puxStackBuffer );
\r
690 if( pxTaskDefinition->puxStackBuffer != NULL )
\r
692 /* Allocate space for the TCB. Where the memory comes from depends
\r
693 on the implementation of the port malloc function and whether or
\r
694 not static allocation is being used. */
\r
695 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
\r
697 if( pxNewTCB != NULL )
\r
699 /* Store the stack location in the TCB. */
\r
700 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
\r
702 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
704 /* Tasks can be created statically or dynamically, so note
\r
705 this task had a statically allocated stack in case it is
\r
706 later deleted. The TCB was allocated dynamically. */
\r
707 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
\r
711 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
\r
712 pxTaskDefinition->pcName,
\r
713 ( uint32_t ) pxTaskDefinition->usStackDepth,
\r
714 pxTaskDefinition->pvParameters,
\r
715 pxTaskDefinition->uxPriority,
\r
716 pxCreatedTask, pxNewTCB,
\r
717 pxTaskDefinition->xRegions );
\r
719 prvAddNewTaskToReadyList( pxNewTCB );
\r
727 #endif /* portUSING_MPU_WRAPPERS */
\r
728 /*-----------------------------------------------------------*/
\r
730 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
732 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
\r
733 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
734 const configSTACK_DEPTH_TYPE usStackDepth,
\r
735 void * const pvParameters,
\r
736 UBaseType_t uxPriority,
\r
737 TaskHandle_t * const pxCreatedTask )
\r
740 BaseType_t xReturn;
\r
742 /* If the stack grows down then allocate the stack then the TCB so the stack
\r
743 does not grow into the TCB. Likewise if the stack grows up then allocate
\r
744 the TCB then the stack. */
\r
745 #if( portSTACK_GROWTH > 0 )
\r
747 /* Allocate space for the TCB. Where the memory comes from depends on
\r
748 the implementation of the port malloc function and whether or not static
\r
749 allocation is being used. */
\r
750 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
\r
752 if( pxNewTCB != NULL )
\r
754 /* Allocate space for the stack used by the task being created.
\r
755 The base of the stack memory stored in the TCB so the task can
\r
756 be deleted later if required. */
\r
757 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
759 if( pxNewTCB->pxStack == NULL )
\r
761 /* Could not allocate the stack. Delete the allocated TCB. */
\r
762 vPortFree( pxNewTCB );
\r
767 #else /* portSTACK_GROWTH */
\r
769 StackType_t *pxStack;
\r
771 /* Allocate space for the stack used by the task being created. */
\r
772 pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
774 if( pxStack != NULL )
\r
776 /* Allocate space for the TCB. */
\r
777 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */
\r
779 if( pxNewTCB != NULL )
\r
781 /* Store the stack location in the TCB. */
\r
782 pxNewTCB->pxStack = pxStack;
\r
786 /* The stack cannot be used as the TCB was not created. Free
\r
788 vPortFree( pxStack );
\r
796 #endif /* portSTACK_GROWTH */
\r
798 if( pxNewTCB != NULL )
\r
800 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */
\r
802 /* Tasks can be created statically or dynamically, so note this
\r
803 task was created dynamically in case it is later deleted. */
\r
804 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
\r
806 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
808 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
\r
809 prvAddNewTaskToReadyList( pxNewTCB );
\r
814 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
820 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
821 /*-----------------------------------------------------------*/
\r
823 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
\r
824 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
825 const uint32_t ulStackDepth,
\r
826 void * const pvParameters,
\r
827 UBaseType_t uxPriority,
\r
828 TaskHandle_t * const pxCreatedTask,
\r
830 const MemoryRegion_t * const xRegions )
\r
832 StackType_t *pxTopOfStack;
\r
835 #if( portUSING_MPU_WRAPPERS == 1 )
\r
836 /* Should the task be created in privileged mode? */
\r
837 BaseType_t xRunPrivileged;
\r
838 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
\r
840 xRunPrivileged = pdTRUE;
\r
844 xRunPrivileged = pdFALSE;
\r
846 uxPriority &= ~portPRIVILEGE_BIT;
\r
847 #endif /* portUSING_MPU_WRAPPERS == 1 */
\r
849 configASSERT( pcName );
\r
851 /* Avoid dependency on memset() if it is not required. */
\r
852 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
\r
854 /* Fill the stack with a known value to assist debugging. */
\r
855 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
\r
857 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
\r
859 /* Calculate the top of stack address. This depends on whether the stack
\r
860 grows from high memory to low (as per the 80x86) or vice versa.
\r
861 portSTACK_GROWTH is used to make the result positive or negative as required
\r
863 #if( portSTACK_GROWTH < 0 )
\r
865 pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
\r
866 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */
\r
868 /* Check the alignment of the calculated top of stack is correct. */
\r
869 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
\r
871 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
\r
873 /* Also record the stack's high address, which may assist
\r
875 pxNewTCB->pxEndOfStack = pxTopOfStack;
\r
877 #endif /* configRECORD_STACK_HIGH_ADDRESS */
\r
879 #else /* portSTACK_GROWTH */
\r
881 pxTopOfStack = pxNewTCB->pxStack;
\r
883 /* Check the alignment of the stack buffer is correct. */
\r
884 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
\r
886 /* The other extreme of the stack space is required if stack checking is
\r
888 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
\r
890 #endif /* portSTACK_GROWTH */
\r
892 /* Store the task name in the TCB. */
\r
893 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
\r
895 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
\r
897 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
\r
898 configMAX_TASK_NAME_LEN characters just in case the memory after the
\r
899 string is not accessible (extremely unlikely). */
\r
900 if( pcName[ x ] == 0x00 )
\r
906 mtCOVERAGE_TEST_MARKER();
\r
910 /* Ensure the name string is terminated in the case that the string length
\r
911 was greater or equal to configMAX_TASK_NAME_LEN. */
\r
912 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
\r
914 /* This is used as an array index so must ensure it's not too large. First
\r
915 remove the privilege bit if one is present. */
\r
916 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
\r
918 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
\r
922 mtCOVERAGE_TEST_MARKER();
\r
925 pxNewTCB->uxPriority = uxPriority;
\r
926 #if ( configUSE_MUTEXES == 1 )
\r
928 pxNewTCB->uxBasePriority = uxPriority;
\r
929 pxNewTCB->uxMutexesHeld = 0;
\r
931 #endif /* configUSE_MUTEXES */
\r
933 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
\r
934 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
\r
936 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
\r
937 back to the containing TCB from a generic item in a list. */
\r
938 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
\r
940 /* Event lists are always in priority order. */
\r
941 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
942 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
\r
944 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
946 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
\r
948 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
950 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
952 pxNewTCB->pxTaskTag = NULL;
\r
954 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
956 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
958 pxNewTCB->ulRunTimeCounter = 0UL;
\r
960 #endif /* configGENERATE_RUN_TIME_STATS */
\r
962 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
964 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
\r
968 /* Avoid compiler warning about unreferenced parameter. */
\r
973 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
975 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
\r
977 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
\r
982 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
\r
984 pxNewTCB->ulNotifiedValue = 0;
\r
985 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
989 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
991 /* Initialise this task's Newlib reent structure. */
\r
992 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
\r
996 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
998 pxNewTCB->ucDelayAborted = pdFALSE;
\r
1002 /* Initialize the TCB stack to look as if the task was already running,
\r
1003 but had been interrupted by the scheduler. The return address is set
\r
1004 to the start of the task function. Once the stack has been initialised
\r
1005 the top of stack variable is updated. */
\r
1006 #if( portUSING_MPU_WRAPPERS == 1 )
\r
1008 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1010 #else /* portUSING_MPU_WRAPPERS */
\r
1012 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
\r
1014 #endif /* portUSING_MPU_WRAPPERS */
\r
1016 if( ( void * ) pxCreatedTask != NULL )
\r
1018 /* Pass the handle out in an anonymous way. The handle can be used to
\r
1019 change the created task's priority, delete the created task, etc.*/
\r
1020 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
\r
1024 mtCOVERAGE_TEST_MARKER();
\r
1027 /*-----------------------------------------------------------*/
\r
1029 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
\r
1031 /* Ensure interrupts don't access the task lists while the lists are being
\r
1033 taskENTER_CRITICAL();
\r
1035 uxCurrentNumberOfTasks++;
\r
1036 if( pxCurrentTCB == NULL )
\r
1038 /* There are no other tasks, or all the other tasks are in
\r
1039 the suspended state - make this the current task. */
\r
1040 pxCurrentTCB = pxNewTCB;
\r
1042 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
\r
1044 /* This is the first task to be created so do the preliminary
\r
1045 initialisation required. We will not recover if this call
\r
1046 fails, but we will report the failure. */
\r
1047 prvInitialiseTaskLists();
\r
1051 mtCOVERAGE_TEST_MARKER();
\r
1056 /* If the scheduler is not already running, make this task the
\r
1057 current task if it is the highest priority task to be created
\r
1059 if( xSchedulerRunning == pdFALSE )
\r
1061 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
\r
1063 pxCurrentTCB = pxNewTCB;
\r
1067 mtCOVERAGE_TEST_MARKER();
\r
1072 mtCOVERAGE_TEST_MARKER();
\r
1078 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1080 /* Add a counter into the TCB for tracing only. */
\r
1081 pxNewTCB->uxTCBNumber = uxTaskNumber;
\r
1083 #endif /* configUSE_TRACE_FACILITY */
\r
1084 traceTASK_CREATE( pxNewTCB );
\r
1086 prvAddTaskToReadyList( pxNewTCB );
\r
1088 portSETUP_TCB( pxNewTCB );
\r
1090 taskEXIT_CRITICAL();
\r
1092 if( xSchedulerRunning != pdFALSE )
\r
1094 /* If the created task is of a higher priority than the current task
\r
1095 then it should run now. */
\r
1096 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
\r
1098 taskYIELD_IF_USING_PREEMPTION();
\r
1102 mtCOVERAGE_TEST_MARKER();
\r
1107 mtCOVERAGE_TEST_MARKER();
\r
1110 /*-----------------------------------------------------------*/
\r
1112 #if ( INCLUDE_vTaskDelete == 1 )
\r
1114 void vTaskDelete( TaskHandle_t xTaskToDelete )
\r
1118 taskENTER_CRITICAL();
\r
1120 /* If null is passed in here then it is the calling task that is
\r
1122 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
\r
1124 /* Remove task from the ready list. */
\r
1125 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1127 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
1131 mtCOVERAGE_TEST_MARKER();
\r
1134 /* Is the task waiting on an event also? */
\r
1135 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
1137 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
1141 mtCOVERAGE_TEST_MARKER();
\r
1144 /* Increment the uxTaskNumber also so kernel aware debuggers can
\r
1145 detect that the task lists need re-generating. This is done before
\r
1146 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
\r
1150 if( pxTCB == pxCurrentTCB )
\r
1152 /* A task is deleting itself. This cannot complete within the
\r
1153 task itself, as a context switch to another task is required.
\r
1154 Place the task in the termination list. The idle task will
\r
1155 check the termination list and free up any memory allocated by
\r
1156 the scheduler for the TCB and stack of the deleted task. */
\r
1157 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
\r
1159 /* Increment the ucTasksDeleted variable so the idle task knows
\r
1160 there is a task that has been deleted and that it should therefore
\r
1161 check the xTasksWaitingTermination list. */
\r
1162 ++uxDeletedTasksWaitingCleanUp;
\r
1164 /* The pre-delete hook is primarily for the Windows simulator,
\r
1165 in which Windows specific clean up operations are performed,
\r
1166 after which it is not possible to yield away from this task -
\r
1167 hence xYieldPending is used to latch that a context switch is
\r
1169 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
\r
1173 --uxCurrentNumberOfTasks;
\r
1174 prvDeleteTCB( pxTCB );
\r
1176 /* Reset the next expected unblock time in case it referred to
\r
1177 the task that has just been deleted. */
\r
1178 prvResetNextTaskUnblockTime();
\r
1181 traceTASK_DELETE( pxTCB );
\r
1183 taskEXIT_CRITICAL();
\r
1185 /* Force a reschedule if it is the currently running task that has just
\r
1187 if( xSchedulerRunning != pdFALSE )
\r
1189 if( pxTCB == pxCurrentTCB )
\r
1191 configASSERT( uxSchedulerSuspended == 0 );
\r
1192 portYIELD_WITHIN_API();
\r
1196 mtCOVERAGE_TEST_MARKER();
\r
1201 #endif /* INCLUDE_vTaskDelete */
\r
1202 /*-----------------------------------------------------------*/
\r
1204 #if ( INCLUDE_vTaskDelayUntil == 1 )
\r
1206 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
\r
1208 TickType_t xTimeToWake;
\r
1209 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
\r
1211 configASSERT( pxPreviousWakeTime );
\r
1212 configASSERT( ( xTimeIncrement > 0U ) );
\r
1213 configASSERT( uxSchedulerSuspended == 0 );
\r
1215 vTaskSuspendAll();
\r
1217 /* Minor optimisation. The tick count cannot change in this
\r
1219 const TickType_t xConstTickCount = xTickCount;
\r
1221 /* Generate the tick time at which the task wants to wake. */
\r
1222 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
\r
1224 if( xConstTickCount < *pxPreviousWakeTime )
\r
1226 /* The tick count has overflowed since this function was
\r
1227 lasted called. In this case the only time we should ever
\r
1228 actually delay is if the wake time has also overflowed,
\r
1229 and the wake time is greater than the tick time. When this
\r
1230 is the case it is as if neither time had overflowed. */
\r
1231 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
\r
1233 xShouldDelay = pdTRUE;
\r
1237 mtCOVERAGE_TEST_MARKER();
\r
1242 /* The tick time has not overflowed. In this case we will
\r
1243 delay if either the wake time has overflowed, and/or the
\r
1244 tick time is less than the wake time. */
\r
1245 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
\r
1247 xShouldDelay = pdTRUE;
\r
1251 mtCOVERAGE_TEST_MARKER();
\r
1255 /* Update the wake time ready for the next call. */
\r
1256 *pxPreviousWakeTime = xTimeToWake;
\r
1258 if( xShouldDelay != pdFALSE )
\r
1260 traceTASK_DELAY_UNTIL( xTimeToWake );
\r
1262 /* prvAddCurrentTaskToDelayedList() needs the block time, not
\r
1263 the time to wake, so subtract the current tick count. */
\r
1264 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
\r
1268 mtCOVERAGE_TEST_MARKER();
\r
1271 xAlreadyYielded = xTaskResumeAll();
\r
1273 /* Force a reschedule if xTaskResumeAll has not already done so, we may
\r
1274 have put ourselves to sleep. */
\r
1275 if( xAlreadyYielded == pdFALSE )
\r
1277 portYIELD_WITHIN_API();
\r
1281 mtCOVERAGE_TEST_MARKER();
\r
1285 #endif /* INCLUDE_vTaskDelayUntil */
\r
1286 /*-----------------------------------------------------------*/
\r
1288 #if ( INCLUDE_vTaskDelay == 1 )
\r
1290 void vTaskDelay( const TickType_t xTicksToDelay )
\r
1292 BaseType_t xAlreadyYielded = pdFALSE;
\r
1294 /* A delay time of zero just forces a reschedule. */
\r
1295 if( xTicksToDelay > ( TickType_t ) 0U )
\r
1297 configASSERT( uxSchedulerSuspended == 0 );
\r
1298 vTaskSuspendAll();
\r
1300 traceTASK_DELAY();
\r
1302 /* A task that is removed from the event list while the
\r
1303 scheduler is suspended will not get placed in the ready
\r
1304 list or removed from the blocked list until the scheduler
\r
1307 This task cannot be in an event list as it is the currently
\r
1308 executing task. */
\r
1309 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
\r
1311 xAlreadyYielded = xTaskResumeAll();
\r
1315 mtCOVERAGE_TEST_MARKER();
\r
1318 /* Force a reschedule if xTaskResumeAll has not already done so, we may
\r
1319 have put ourselves to sleep. */
\r
1320 if( xAlreadyYielded == pdFALSE )
\r
1322 portYIELD_WITHIN_API();
\r
1326 mtCOVERAGE_TEST_MARKER();
\r
1330 #endif /* INCLUDE_vTaskDelay */
\r
1331 /*-----------------------------------------------------------*/
\r
1333 #if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) )
\r
1335 eTaskState eTaskGetState( TaskHandle_t xTask )
\r
1337 eTaskState eReturn;
\r
1338 List_t *pxStateList;
\r
1339 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
\r
1341 configASSERT( pxTCB );
\r
1343 if( pxTCB == pxCurrentTCB )
\r
1345 /* The task calling this function is querying its own state. */
\r
1346 eReturn = eRunning;
\r
1350 taskENTER_CRITICAL();
\r
1352 pxStateList = ( List_t * ) listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
\r
1354 taskEXIT_CRITICAL();
\r
1356 if( ( pxStateList == pxDelayedTaskList ) || ( pxStateList == pxOverflowDelayedTaskList ) )
\r
1358 /* The task being queried is referenced from one of the Blocked
\r
1360 eReturn = eBlocked;
\r
1363 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1364 else if( pxStateList == &xSuspendedTaskList )
\r
1366 /* The task being queried is referenced from the suspended
\r
1367 list. Is it genuinely suspended or is it block
\r
1369 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
\r
1371 eReturn = eSuspended;
\r
1375 eReturn = eBlocked;
\r
1380 #if ( INCLUDE_vTaskDelete == 1 )
\r
1381 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
\r
1383 /* The task being queried is referenced from the deleted
\r
1384 tasks list, or it is not referenced from any lists at
\r
1386 eReturn = eDeleted;
\r
1390 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
\r
1392 /* If the task is not in any other state, it must be in the
\r
1393 Ready (including pending ready) state. */
\r
1399 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
\r
1401 #endif /* INCLUDE_eTaskGetState */
\r
1402 /*-----------------------------------------------------------*/
\r
1404 #if ( INCLUDE_uxTaskPriorityGet == 1 )
\r
1406 UBaseType_t uxTaskPriorityGet( TaskHandle_t xTask )
\r
1409 UBaseType_t uxReturn;
\r
1411 taskENTER_CRITICAL();
\r
1413 /* If null is passed in here then it is the priority of the that
\r
1414 called uxTaskPriorityGet() that is being queried. */
\r
1415 pxTCB = prvGetTCBFromHandle( xTask );
\r
1416 uxReturn = pxTCB->uxPriority;
\r
1418 taskEXIT_CRITICAL();
\r
1423 #endif /* INCLUDE_uxTaskPriorityGet */
\r
1424 /*-----------------------------------------------------------*/
\r
1426 #if ( INCLUDE_uxTaskPriorityGet == 1 )
\r
1428 UBaseType_t uxTaskPriorityGetFromISR( TaskHandle_t xTask )
\r
1431 UBaseType_t uxReturn, uxSavedInterruptState;
\r
1433 /* RTOS ports that support interrupt nesting have the concept of a
\r
1434 maximum system call (or maximum API call) interrupt priority.
\r
1435 Interrupts that are above the maximum system call priority are keep
\r
1436 permanently enabled, even when the RTOS kernel is in a critical section,
\r
1437 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
1438 is defined in FreeRTOSConfig.h then
\r
1439 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1440 failure if a FreeRTOS API function is called from an interrupt that has
\r
1441 been assigned a priority above the configured maximum system call
\r
1442 priority. Only FreeRTOS functions that end in FromISR can be called
\r
1443 from interrupts that have been assigned a priority at or (logically)
\r
1444 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
1445 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
1446 simple as possible. More information (albeit Cortex-M specific) is
\r
1447 provided on the following link:
\r
1448 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1449 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1451 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1453 /* If null is passed in here then it is the priority of the calling
\r
1454 task that is being queried. */
\r
1455 pxTCB = prvGetTCBFromHandle( xTask );
\r
1456 uxReturn = pxTCB->uxPriority;
\r
1458 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
\r
1463 #endif /* INCLUDE_uxTaskPriorityGet */
\r
1464 /*-----------------------------------------------------------*/
\r
1466 #if ( INCLUDE_vTaskPrioritySet == 1 )
\r
1468 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
\r
1471 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
\r
1472 BaseType_t xYieldRequired = pdFALSE;
\r
1474 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
\r
1476 /* Ensure the new priority is valid. */
\r
1477 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
\r
1479 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
\r
1483 mtCOVERAGE_TEST_MARKER();
\r
1486 taskENTER_CRITICAL();
\r
1488 /* If null is passed in here then it is the priority of the calling
\r
1489 task that is being changed. */
\r
1490 pxTCB = prvGetTCBFromHandle( xTask );
\r
1492 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
\r
1494 #if ( configUSE_MUTEXES == 1 )
\r
1496 uxCurrentBasePriority = pxTCB->uxBasePriority;
\r
1500 uxCurrentBasePriority = pxTCB->uxPriority;
\r
1504 if( uxCurrentBasePriority != uxNewPriority )
\r
1506 /* The priority change may have readied a task of higher
\r
1507 priority than the calling task. */
\r
1508 if( uxNewPriority > uxCurrentBasePriority )
\r
1510 if( pxTCB != pxCurrentTCB )
\r
1512 /* The priority of a task other than the currently
\r
1513 running task is being raised. Is the priority being
\r
1514 raised above that of the running task? */
\r
1515 if( uxNewPriority >= pxCurrentTCB->uxPriority )
\r
1517 xYieldRequired = pdTRUE;
\r
1521 mtCOVERAGE_TEST_MARKER();
\r
1526 /* The priority of the running task is being raised,
\r
1527 but the running task must already be the highest
\r
1528 priority task able to run so no yield is required. */
\r
1531 else if( pxTCB == pxCurrentTCB )
\r
1533 /* Setting the priority of the running task down means
\r
1534 there may now be another task of higher priority that
\r
1535 is ready to execute. */
\r
1536 xYieldRequired = pdTRUE;
\r
1540 /* Setting the priority of any other task down does not
\r
1541 require a yield as the running task must be above the
\r
1542 new priority of the task being modified. */
\r
1545 /* Remember the ready list the task might be referenced from
\r
1546 before its uxPriority member is changed so the
\r
1547 taskRESET_READY_PRIORITY() macro can function correctly. */
\r
1548 uxPriorityUsedOnEntry = pxTCB->uxPriority;
\r
1550 #if ( configUSE_MUTEXES == 1 )
\r
1552 /* Only change the priority being used if the task is not
\r
1553 currently using an inherited priority. */
\r
1554 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
\r
1556 pxTCB->uxPriority = uxNewPriority;
\r
1560 mtCOVERAGE_TEST_MARKER();
\r
1563 /* The base priority gets set whatever. */
\r
1564 pxTCB->uxBasePriority = uxNewPriority;
\r
1568 pxTCB->uxPriority = uxNewPriority;
\r
1572 /* Only reset the event list item value if the value is not
\r
1573 being used for anything else. */
\r
1574 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
1576 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1580 mtCOVERAGE_TEST_MARKER();
\r
1583 /* If the task is in the blocked or suspended list we need do
\r
1584 nothing more than change its priority variable. However, if
\r
1585 the task is in a ready list it needs to be removed and placed
\r
1586 in the list appropriate to its new priority. */
\r
1587 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
1589 /* The task is currently in its ready list - remove before
\r
1590 adding it to it's new ready list. As we are in a critical
\r
1591 section we can do this even if the scheduler is suspended. */
\r
1592 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1594 /* It is known that the task is in its ready list so
\r
1595 there is no need to check again and the port level
\r
1596 reset macro can be called directly. */
\r
1597 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
\r
1601 mtCOVERAGE_TEST_MARKER();
\r
1603 prvAddTaskToReadyList( pxTCB );
\r
1607 mtCOVERAGE_TEST_MARKER();
\r
1610 if( xYieldRequired != pdFALSE )
\r
1612 taskYIELD_IF_USING_PREEMPTION();
\r
1616 mtCOVERAGE_TEST_MARKER();
\r
1619 /* Remove compiler warning about unused variables when the port
\r
1620 optimised task selection is not being used. */
\r
1621 ( void ) uxPriorityUsedOnEntry;
\r
1624 taskEXIT_CRITICAL();
\r
1627 #endif /* INCLUDE_vTaskPrioritySet */
\r
1628 /*-----------------------------------------------------------*/
\r
1630 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1632 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
\r
1636 taskENTER_CRITICAL();
\r
1638 /* If null is passed in here then it is the running task that is
\r
1639 being suspended. */
\r
1640 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
\r
1642 traceTASK_SUSPEND( pxTCB );
\r
1644 /* Remove task from the ready/delayed list and place in the
\r
1645 suspended list. */
\r
1646 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1648 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
1652 mtCOVERAGE_TEST_MARKER();
\r
1655 /* Is the task waiting on an event also? */
\r
1656 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
1658 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
1662 mtCOVERAGE_TEST_MARKER();
\r
1665 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
\r
1667 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
1669 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
\r
1671 /* The task was blocked to wait for a notification, but is
\r
1672 now suspended, so no notification was received. */
\r
1673 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
1678 taskEXIT_CRITICAL();
\r
1680 if( xSchedulerRunning != pdFALSE )
\r
1682 /* Reset the next expected unblock time in case it referred to the
\r
1683 task that is now in the Suspended state. */
\r
1684 taskENTER_CRITICAL();
\r
1686 prvResetNextTaskUnblockTime();
\r
1688 taskEXIT_CRITICAL();
\r
1692 mtCOVERAGE_TEST_MARKER();
\r
1695 if( pxTCB == pxCurrentTCB )
\r
1697 if( xSchedulerRunning != pdFALSE )
\r
1699 /* The current task has just been suspended. */
\r
1700 configASSERT( uxSchedulerSuspended == 0 );
\r
1701 portYIELD_WITHIN_API();
\r
1705 /* The scheduler is not running, but the task that was pointed
\r
1706 to by pxCurrentTCB has just been suspended and pxCurrentTCB
\r
1707 must be adjusted to point to a different task. */
\r
1708 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
\r
1710 /* No other tasks are ready, so set pxCurrentTCB back to
\r
1711 NULL so when the next task is created pxCurrentTCB will
\r
1712 be set to point to it no matter what its relative priority
\r
1714 pxCurrentTCB = NULL;
\r
1718 vTaskSwitchContext();
\r
1724 mtCOVERAGE_TEST_MARKER();
\r
1728 #endif /* INCLUDE_vTaskSuspend */
\r
1729 /*-----------------------------------------------------------*/
\r
1731 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1733 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
\r
1735 BaseType_t xReturn = pdFALSE;
\r
1736 const TCB_t * const pxTCB = ( TCB_t * ) xTask;
\r
1738 /* Accesses xPendingReadyList so must be called from a critical
\r
1741 /* It does not make sense to check if the calling task is suspended. */
\r
1742 configASSERT( xTask );
\r
1744 /* Is the task being resumed actually in the suspended list? */
\r
1745 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
1747 /* Has the task already been resumed from within an ISR? */
\r
1748 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
\r
1750 /* Is it in the suspended list because it is in the Suspended
\r
1751 state, or because is is blocked with no timeout? */
\r
1752 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
\r
1758 mtCOVERAGE_TEST_MARKER();
\r
1763 mtCOVERAGE_TEST_MARKER();
\r
1768 mtCOVERAGE_TEST_MARKER();
\r
1772 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
\r
1774 #endif /* INCLUDE_vTaskSuspend */
\r
1775 /*-----------------------------------------------------------*/
\r
1777 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1779 void vTaskResume( TaskHandle_t xTaskToResume )
\r
1781 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
\r
1783 /* It does not make sense to resume the calling task. */
\r
1784 configASSERT( xTaskToResume );
\r
1786 /* The parameter cannot be NULL as it is impossible to resume the
\r
1787 currently executing task. */
\r
1788 if( ( pxTCB != NULL ) && ( pxTCB != pxCurrentTCB ) )
\r
1790 taskENTER_CRITICAL();
\r
1792 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
\r
1794 traceTASK_RESUME( pxTCB );
\r
1796 /* The ready list can be accessed even if the scheduler is
\r
1797 suspended because this is inside a critical section. */
\r
1798 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
1799 prvAddTaskToReadyList( pxTCB );
\r
1801 /* A higher priority task may have just been resumed. */
\r
1802 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
1804 /* This yield may not cause the task just resumed to run,
\r
1805 but will leave the lists in the correct state for the
\r
1807 taskYIELD_IF_USING_PREEMPTION();
\r
1811 mtCOVERAGE_TEST_MARKER();
\r
1816 mtCOVERAGE_TEST_MARKER();
\r
1819 taskEXIT_CRITICAL();
\r
1823 mtCOVERAGE_TEST_MARKER();
\r
1827 #endif /* INCLUDE_vTaskSuspend */
\r
1829 /*-----------------------------------------------------------*/
\r
1831 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
\r
1833 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
\r
1835 BaseType_t xYieldRequired = pdFALSE;
\r
1836 TCB_t * const pxTCB = ( TCB_t * ) xTaskToResume;
\r
1837 UBaseType_t uxSavedInterruptStatus;
\r
1839 configASSERT( xTaskToResume );
\r
1841 /* RTOS ports that support interrupt nesting have the concept of a
\r
1842 maximum system call (or maximum API call) interrupt priority.
\r
1843 Interrupts that are above the maximum system call priority are keep
\r
1844 permanently enabled, even when the RTOS kernel is in a critical section,
\r
1845 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
1846 is defined in FreeRTOSConfig.h then
\r
1847 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1848 failure if a FreeRTOS API function is called from an interrupt that has
\r
1849 been assigned a priority above the configured maximum system call
\r
1850 priority. Only FreeRTOS functions that end in FromISR can be called
\r
1851 from interrupts that have been assigned a priority at or (logically)
\r
1852 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
1853 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
1854 simple as possible. More information (albeit Cortex-M specific) is
\r
1855 provided on the following link:
\r
1856 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1857 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1859 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1861 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
\r
1863 traceTASK_RESUME_FROM_ISR( pxTCB );
\r
1865 /* Check the ready lists can be accessed. */
\r
1866 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
1868 /* Ready lists can be accessed so move the task from the
\r
1869 suspended list to the ready list directly. */
\r
1870 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
1872 xYieldRequired = pdTRUE;
\r
1876 mtCOVERAGE_TEST_MARKER();
\r
1879 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
1880 prvAddTaskToReadyList( pxTCB );
\r
1884 /* The delayed or ready lists cannot be accessed so the task
\r
1885 is held in the pending ready list until the scheduler is
\r
1887 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
1892 mtCOVERAGE_TEST_MARKER();
\r
1895 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1897 return xYieldRequired;
\r
1900 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
\r
1901 /*-----------------------------------------------------------*/
\r
1903 void vTaskStartScheduler( void )
\r
1905 BaseType_t xReturn;
\r
1907 /* Add the idle task at the lowest priority. */
\r
1908 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
1910 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
\r
1911 StackType_t *pxIdleTaskStackBuffer = NULL;
\r
1912 uint32_t ulIdleTaskStackSize;
\r
1914 /* The Idle task is created using user provided RAM - obtain the
\r
1915 address of the RAM then create the idle task. */
\r
1916 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
\r
1917 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
\r
1918 configIDLE_TASK_NAME,
\r
1919 ulIdleTaskStackSize,
\r
1920 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
\r
1921 ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ),
\r
1922 pxIdleTaskStackBuffer,
\r
1923 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
\r
1925 if( xIdleTaskHandle != NULL )
\r
1936 /* The Idle task is being created using dynamically allocated RAM. */
\r
1937 xReturn = xTaskCreate( prvIdleTask,
\r
1938 configIDLE_TASK_NAME,
\r
1939 configMINIMAL_STACK_SIZE,
\r
1941 ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ),
\r
1942 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
\r
1944 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
1946 #if ( configUSE_TIMERS == 1 )
\r
1948 if( xReturn == pdPASS )
\r
1950 xReturn = xTimerCreateTimerTask();
\r
1954 mtCOVERAGE_TEST_MARKER();
\r
1957 #endif /* configUSE_TIMERS */
\r
1959 if( xReturn == pdPASS )
\r
1961 /* freertos_tasks_c_additions_init() should only be called if the user
\r
1962 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
\r
1963 the only macro called by the function. */
\r
1964 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
1966 freertos_tasks_c_additions_init();
\r
1970 /* Interrupts are turned off here, to ensure a tick does not occur
\r
1971 before or during the call to xPortStartScheduler(). The stacks of
\r
1972 the created tasks contain a status word with interrupts switched on
\r
1973 so interrupts will automatically get re-enabled when the first task
\r
1975 portDISABLE_INTERRUPTS();
\r
1977 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
1979 /* Switch Newlib's _impure_ptr variable to point to the _reent
\r
1980 structure specific to the task that will run first. */
\r
1981 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
\r
1983 #endif /* configUSE_NEWLIB_REENTRANT */
\r
1985 xNextTaskUnblockTime = portMAX_DELAY;
\r
1986 xSchedulerRunning = pdTRUE;
\r
1987 xTickCount = ( TickType_t ) 0U;
\r
1989 /* If configGENERATE_RUN_TIME_STATS is defined then the following
\r
1990 macro must be defined to configure the timer/counter used to generate
\r
1991 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
\r
1992 is set to 0 and the following line fails to build then ensure you do not
\r
1993 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
\r
1994 FreeRTOSConfig.h file. */
\r
1995 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
\r
1997 /* Setting up the timer tick is hardware specific and thus in the
\r
1998 portable interface. */
\r
1999 if( xPortStartScheduler() != pdFALSE )
\r
2001 /* Should not reach here as if the scheduler is running the
\r
2002 function will not return. */
\r
2006 /* Should only reach here if a task calls xTaskEndScheduler(). */
\r
2011 /* This line will only be reached if the kernel could not be started,
\r
2012 because there was not enough FreeRTOS heap to create the idle task
\r
2013 or the timer task. */
\r
2014 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
\r
2017 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
\r
2018 meaning xIdleTaskHandle is not used anywhere else. */
\r
2019 ( void ) xIdleTaskHandle;
\r
2021 /*-----------------------------------------------------------*/
\r
2023 void vTaskEndScheduler( void )
\r
2025 /* Stop the scheduler interrupts and call the portable scheduler end
\r
2026 routine so the original ISRs can be restored if necessary. The port
\r
2027 layer must ensure interrupts enable bit is left in the correct state. */
\r
2028 portDISABLE_INTERRUPTS();
\r
2029 xSchedulerRunning = pdFALSE;
\r
2030 vPortEndScheduler();
\r
2032 /*----------------------------------------------------------*/
\r
2034 void vTaskSuspendAll( void )
\r
2036 /* A critical section is not required as the variable is of type
\r
2037 BaseType_t. Please read Richard Barry's reply in the following link to a
\r
2038 post in the FreeRTOS support forum before reporting this as a bug! -
\r
2039 http://goo.gl/wu4acr */
\r
2040 ++uxSchedulerSuspended;
\r
2042 /*----------------------------------------------------------*/
\r
2044 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
2046 static TickType_t prvGetExpectedIdleTime( void )
\r
2048 TickType_t xReturn;
\r
2049 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
\r
2051 /* uxHigherPriorityReadyTasks takes care of the case where
\r
2052 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
\r
2053 task that are in the Ready state, even though the idle task is
\r
2055 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
\r
2057 if( uxTopReadyPriority > tskIDLE_PRIORITY )
\r
2059 uxHigherPriorityReadyTasks = pdTRUE;
\r
2064 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
\r
2066 /* When port optimised task selection is used the uxTopReadyPriority
\r
2067 variable is used as a bit map. If bits other than the least
\r
2068 significant bit are set then there are tasks that have a priority
\r
2069 above the idle priority that are in the Ready state. This takes
\r
2070 care of the case where the co-operative scheduler is in use. */
\r
2071 if( uxTopReadyPriority > uxLeastSignificantBit )
\r
2073 uxHigherPriorityReadyTasks = pdTRUE;
\r
2078 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
\r
2082 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
\r
2084 /* There are other idle priority tasks in the ready state. If
\r
2085 time slicing is used then the very next tick interrupt must be
\r
2089 else if( uxHigherPriorityReadyTasks != pdFALSE )
\r
2091 /* There are tasks in the Ready state that have a priority above the
\r
2092 idle priority. This path can only be reached if
\r
2093 configUSE_PREEMPTION is 0. */
\r
2098 xReturn = xNextTaskUnblockTime - xTickCount;
\r
2104 #endif /* configUSE_TICKLESS_IDLE */
\r
2105 /*----------------------------------------------------------*/
\r
2107 BaseType_t xTaskResumeAll( void )
\r
2109 TCB_t *pxTCB = NULL;
\r
2110 BaseType_t xAlreadyYielded = pdFALSE;
\r
2112 /* If uxSchedulerSuspended is zero then this function does not match a
\r
2113 previous call to vTaskSuspendAll(). */
\r
2114 configASSERT( uxSchedulerSuspended );
\r
2116 /* It is possible that an ISR caused a task to be removed from an event
\r
2117 list while the scheduler was suspended. If this was the case then the
\r
2118 removed task will have been added to the xPendingReadyList. Once the
\r
2119 scheduler has been resumed it is safe to move all the pending ready
\r
2120 tasks from this list into their appropriate ready list. */
\r
2121 taskENTER_CRITICAL();
\r
2123 --uxSchedulerSuspended;
\r
2125 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
2127 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
\r
2129 /* Move any readied tasks from the pending list into the
\r
2130 appropriate ready list. */
\r
2131 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
\r
2133 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
\r
2134 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2135 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2136 prvAddTaskToReadyList( pxTCB );
\r
2138 /* If the moved task has a priority higher than the current
\r
2139 task then a yield must be performed. */
\r
2140 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
2142 xYieldPending = pdTRUE;
\r
2146 mtCOVERAGE_TEST_MARKER();
\r
2150 if( pxTCB != NULL )
\r
2152 /* A task was unblocked while the scheduler was suspended,
\r
2153 which may have prevented the next unblock time from being
\r
2154 re-calculated, in which case re-calculate it now. Mainly
\r
2155 important for low power tickless implementations, where
\r
2156 this can prevent an unnecessary exit from low power
\r
2158 prvResetNextTaskUnblockTime();
\r
2161 /* If any ticks occurred while the scheduler was suspended then
\r
2162 they should be processed now. This ensures the tick count does
\r
2163 not slip, and that any delayed tasks are resumed at the correct
\r
2166 UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */
\r
2168 if( uxPendedCounts > ( UBaseType_t ) 0U )
\r
2172 if( xTaskIncrementTick() != pdFALSE )
\r
2174 xYieldPending = pdTRUE;
\r
2178 mtCOVERAGE_TEST_MARKER();
\r
2181 } while( uxPendedCounts > ( UBaseType_t ) 0U );
\r
2183 uxPendedTicks = 0;
\r
2187 mtCOVERAGE_TEST_MARKER();
\r
2191 if( xYieldPending != pdFALSE )
\r
2193 #if( configUSE_PREEMPTION != 0 )
\r
2195 xAlreadyYielded = pdTRUE;
\r
2198 taskYIELD_IF_USING_PREEMPTION();
\r
2202 mtCOVERAGE_TEST_MARKER();
\r
2208 mtCOVERAGE_TEST_MARKER();
\r
2211 taskEXIT_CRITICAL();
\r
2213 return xAlreadyYielded;
\r
2215 /*-----------------------------------------------------------*/
\r
2217 TickType_t xTaskGetTickCount( void )
\r
2219 TickType_t xTicks;
\r
2221 /* Critical section required if running on a 16 bit processor. */
\r
2222 portTICK_TYPE_ENTER_CRITICAL();
\r
2224 xTicks = xTickCount;
\r
2226 portTICK_TYPE_EXIT_CRITICAL();
\r
2230 /*-----------------------------------------------------------*/
\r
2232 TickType_t xTaskGetTickCountFromISR( void )
\r
2234 TickType_t xReturn;
\r
2235 UBaseType_t uxSavedInterruptStatus;
\r
2237 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
2238 system call (or maximum API call) interrupt priority. Interrupts that are
\r
2239 above the maximum system call priority are kept permanently enabled, even
\r
2240 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
2241 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
2242 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
2243 failure if a FreeRTOS API function is called from an interrupt that has been
\r
2244 assigned a priority above the configured maximum system call priority.
\r
2245 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
2246 that have been assigned a priority at or (logically) below the maximum
\r
2247 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
2248 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
2249 More information (albeit Cortex-M specific) is provided on the following
\r
2250 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
2251 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
2253 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
\r
2255 xReturn = xTickCount;
\r
2257 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
2261 /*-----------------------------------------------------------*/
\r
2263 UBaseType_t uxTaskGetNumberOfTasks( void )
\r
2265 /* A critical section is not required because the variables are of type
\r
2267 return uxCurrentNumberOfTasks;
\r
2269 /*-----------------------------------------------------------*/
\r
2271 char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2275 /* If null is passed in here then the name of the calling task is being
\r
2277 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
\r
2278 configASSERT( pxTCB );
\r
2279 return &( pxTCB->pcTaskName[ 0 ] );
\r
2281 /*-----------------------------------------------------------*/
\r
2283 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
2285 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
\r
2287 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
\r
2291 /* This function is called with the scheduler suspended. */
\r
2293 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
\r
2295 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
\r
2299 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
\r
2301 /* Check each character in the name looking for a match or
\r
2303 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
\r
2305 cNextChar = pxNextTCB->pcTaskName[ x ];
\r
2307 if( cNextChar != pcNameToQuery[ x ] )
\r
2309 /* Characters didn't match. */
\r
2312 else if( cNextChar == 0x00 )
\r
2314 /* Both strings terminated, a match must have been
\r
2316 pxReturn = pxNextTCB;
\r
2321 mtCOVERAGE_TEST_MARKER();
\r
2325 if( pxReturn != NULL )
\r
2327 /* The handle has been found. */
\r
2331 } while( pxNextTCB != pxFirstTCB );
\r
2335 mtCOVERAGE_TEST_MARKER();
\r
2341 #endif /* INCLUDE_xTaskGetHandle */
\r
2342 /*-----------------------------------------------------------*/
\r
2344 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
2346 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2348 UBaseType_t uxQueue = configMAX_PRIORITIES;
\r
2351 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
\r
2352 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
\r
2354 vTaskSuspendAll();
\r
2356 /* Search the ready lists. */
\r
2360 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
\r
2362 if( pxTCB != NULL )
\r
2364 /* Found the handle. */
\r
2368 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2370 /* Search the delayed lists. */
\r
2371 if( pxTCB == NULL )
\r
2373 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
\r
2376 if( pxTCB == NULL )
\r
2378 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
\r
2381 #if ( INCLUDE_vTaskSuspend == 1 )
\r
2383 if( pxTCB == NULL )
\r
2385 /* Search the suspended list. */
\r
2386 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
\r
2391 #if( INCLUDE_vTaskDelete == 1 )
\r
2393 if( pxTCB == NULL )
\r
2395 /* Search the deleted list. */
\r
2396 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
\r
2401 ( void ) xTaskResumeAll();
\r
2403 return ( TaskHandle_t ) pxTCB;
\r
2406 #endif /* INCLUDE_xTaskGetHandle */
\r
2407 /*-----------------------------------------------------------*/
\r
2409 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2411 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
\r
2413 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
\r
2415 vTaskSuspendAll();
\r
2417 /* Is there a space in the array for each task in the system? */
\r
2418 if( uxArraySize >= uxCurrentNumberOfTasks )
\r
2420 /* Fill in an TaskStatus_t structure with information on each
\r
2421 task in the Ready state. */
\r
2425 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
\r
2427 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2429 /* Fill in an TaskStatus_t structure with information on each
\r
2430 task in the Blocked state. */
\r
2431 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
\r
2432 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
\r
2434 #if( INCLUDE_vTaskDelete == 1 )
\r
2436 /* Fill in an TaskStatus_t structure with information on
\r
2437 each task that has been deleted but not yet cleaned up. */
\r
2438 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
\r
2442 #if ( INCLUDE_vTaskSuspend == 1 )
\r
2444 /* Fill in an TaskStatus_t structure with information on
\r
2445 each task in the Suspended state. */
\r
2446 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
\r
2450 #if ( configGENERATE_RUN_TIME_STATS == 1)
\r
2452 if( pulTotalRunTime != NULL )
\r
2454 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
\r
2455 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
\r
2457 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
\r
2463 if( pulTotalRunTime != NULL )
\r
2465 *pulTotalRunTime = 0;
\r
2472 mtCOVERAGE_TEST_MARKER();
\r
2475 ( void ) xTaskResumeAll();
\r
2480 #endif /* configUSE_TRACE_FACILITY */
\r
2481 /*----------------------------------------------------------*/
\r
2483 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
\r
2485 TaskHandle_t xTaskGetIdleTaskHandle( void )
\r
2487 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
\r
2488 started, then xIdleTaskHandle will be NULL. */
\r
2489 configASSERT( ( xIdleTaskHandle != NULL ) );
\r
2490 return xIdleTaskHandle;
\r
2493 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
\r
2494 /*----------------------------------------------------------*/
\r
2496 /* This conditional compilation should use inequality to 0, not equality to 1.
\r
2497 This is to ensure vTaskStepTick() is available when user defined low power mode
\r
2498 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
\r
2500 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
2502 void vTaskStepTick( const TickType_t xTicksToJump )
\r
2504 /* Correct the tick count value after a period during which the tick
\r
2505 was suppressed. Note this does *not* call the tick hook function for
\r
2506 each stepped tick. */
\r
2507 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
\r
2508 xTickCount += xTicksToJump;
\r
2509 traceINCREASE_TICK_COUNT( xTicksToJump );
\r
2512 #endif /* configUSE_TICKLESS_IDLE */
\r
2513 /*----------------------------------------------------------*/
\r
2515 #if ( INCLUDE_xTaskAbortDelay == 1 )
\r
2517 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
\r
2519 TCB_t *pxTCB = ( TCB_t * ) xTask;
\r
2520 BaseType_t xReturn;
\r
2522 configASSERT( pxTCB );
\r
2524 vTaskSuspendAll();
\r
2526 /* A task can only be prematurely removed from the Blocked state if
\r
2527 it is actually in the Blocked state. */
\r
2528 if( eTaskGetState( xTask ) == eBlocked )
\r
2532 /* Remove the reference to the task from the blocked list. An
\r
2533 interrupt won't touch the xStateListItem because the
\r
2534 scheduler is suspended. */
\r
2535 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2537 /* Is the task waiting on an event also? If so remove it from
\r
2538 the event list too. Interrupts can touch the event list item,
\r
2539 even though the scheduler is suspended, so a critical section
\r
2541 taskENTER_CRITICAL();
\r
2543 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2545 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2546 pxTCB->ucDelayAborted = pdTRUE;
\r
2550 mtCOVERAGE_TEST_MARKER();
\r
2553 taskEXIT_CRITICAL();
\r
2555 /* Place the unblocked task into the appropriate ready list. */
\r
2556 prvAddTaskToReadyList( pxTCB );
\r
2558 /* A task being unblocked cannot cause an immediate context
\r
2559 switch if preemption is turned off. */
\r
2560 #if ( configUSE_PREEMPTION == 1 )
\r
2562 /* Preemption is on, but a context switch should only be
\r
2563 performed if the unblocked task has a priority that is
\r
2564 equal to or higher than the currently executing task. */
\r
2565 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
2567 /* Pend the yield to be performed when the scheduler
\r
2568 is unsuspended. */
\r
2569 xYieldPending = pdTRUE;
\r
2573 mtCOVERAGE_TEST_MARKER();
\r
2576 #endif /* configUSE_PREEMPTION */
\r
2583 ( void ) xTaskResumeAll();
\r
2588 #endif /* INCLUDE_xTaskAbortDelay */
\r
2589 /*----------------------------------------------------------*/
\r
2591 BaseType_t xTaskIncrementTick( void )
\r
2594 TickType_t xItemValue;
\r
2595 BaseType_t xSwitchRequired = pdFALSE;
\r
2597 /* Called by the portable layer each time a tick interrupt occurs.
\r
2598 Increments the tick then checks to see if the new tick value will cause any
\r
2599 tasks to be unblocked. */
\r
2600 traceTASK_INCREMENT_TICK( xTickCount );
\r
2601 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
2603 /* Minor optimisation. The tick count cannot change in this
\r
2605 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
\r
2607 /* Increment the RTOS tick, switching the delayed and overflowed
\r
2608 delayed lists if it wraps to 0. */
\r
2609 xTickCount = xConstTickCount;
\r
2611 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
\r
2613 taskSWITCH_DELAYED_LISTS();
\r
2617 mtCOVERAGE_TEST_MARKER();
\r
2620 /* See if this tick has made a timeout expire. Tasks are stored in
\r
2621 the queue in the order of their wake time - meaning once one task
\r
2622 has been found whose block time has not expired there is no need to
\r
2623 look any further down the list. */
\r
2624 if( xConstTickCount >= xNextTaskUnblockTime )
\r
2628 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
\r
2630 /* The delayed list is empty. Set xNextTaskUnblockTime
\r
2631 to the maximum possible value so it is extremely
\r
2633 if( xTickCount >= xNextTaskUnblockTime ) test will pass
\r
2634 next time through. */
\r
2635 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2640 /* The delayed list is not empty, get the value of the
\r
2641 item at the head of the delayed list. This is the time
\r
2642 at which the task at the head of the delayed list must
\r
2643 be removed from the Blocked state. */
\r
2644 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
\r
2645 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
\r
2647 if( xConstTickCount < xItemValue )
\r
2649 /* It is not time to unblock this item yet, but the
\r
2650 item value is the time at which the task at the head
\r
2651 of the blocked list must be removed from the Blocked
\r
2652 state - so record the item value in
\r
2653 xNextTaskUnblockTime. */
\r
2654 xNextTaskUnblockTime = xItemValue;
\r
2659 mtCOVERAGE_TEST_MARKER();
\r
2662 /* It is time to remove the item from the Blocked state. */
\r
2663 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2665 /* Is the task waiting on an event also? If so remove
\r
2666 it from the event list. */
\r
2667 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2669 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2673 mtCOVERAGE_TEST_MARKER();
\r
2676 /* Place the unblocked task into the appropriate ready
\r
2678 prvAddTaskToReadyList( pxTCB );
\r
2680 /* A task being unblocked cannot cause an immediate
\r
2681 context switch if preemption is turned off. */
\r
2682 #if ( configUSE_PREEMPTION == 1 )
\r
2684 /* Preemption is on, but a context switch should
\r
2685 only be performed if the unblocked task has a
\r
2686 priority that is equal to or higher than the
\r
2687 currently executing task. */
\r
2688 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
2690 xSwitchRequired = pdTRUE;
\r
2694 mtCOVERAGE_TEST_MARKER();
\r
2697 #endif /* configUSE_PREEMPTION */
\r
2702 /* Tasks of equal priority to the currently running task will share
\r
2703 processing time (time slice) if preemption is on, and the application
\r
2704 writer has not explicitly turned time slicing off. */
\r
2705 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
\r
2707 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
\r
2709 xSwitchRequired = pdTRUE;
\r
2713 mtCOVERAGE_TEST_MARKER();
\r
2716 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
\r
2718 #if ( configUSE_TICK_HOOK == 1 )
\r
2720 /* Guard against the tick hook being called when the pended tick
\r
2721 count is being unwound (when the scheduler is being unlocked). */
\r
2722 if( uxPendedTicks == ( UBaseType_t ) 0U )
\r
2724 vApplicationTickHook();
\r
2728 mtCOVERAGE_TEST_MARKER();
\r
2731 #endif /* configUSE_TICK_HOOK */
\r
2737 /* The tick hook gets called at regular intervals, even if the
\r
2738 scheduler is locked. */
\r
2739 #if ( configUSE_TICK_HOOK == 1 )
\r
2741 vApplicationTickHook();
\r
2746 #if ( configUSE_PREEMPTION == 1 )
\r
2748 if( xYieldPending != pdFALSE )
\r
2750 xSwitchRequired = pdTRUE;
\r
2754 mtCOVERAGE_TEST_MARKER();
\r
2757 #endif /* configUSE_PREEMPTION */
\r
2759 return xSwitchRequired;
\r
2761 /*-----------------------------------------------------------*/
\r
2763 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2765 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
\r
2769 /* If xTask is NULL then it is the task hook of the calling task that is
\r
2771 if( xTask == NULL )
\r
2773 xTCB = ( TCB_t * ) pxCurrentTCB;
\r
2777 xTCB = ( TCB_t * ) xTask;
\r
2780 /* Save the hook function in the TCB. A critical section is required as
\r
2781 the value can be accessed from an interrupt. */
\r
2782 taskENTER_CRITICAL();
\r
2783 xTCB->pxTaskTag = pxHookFunction;
\r
2784 taskEXIT_CRITICAL();
\r
2787 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2788 /*-----------------------------------------------------------*/
\r
2790 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2792 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
\r
2795 TaskHookFunction_t xReturn;
\r
2797 /* If xTask is NULL then we are setting our own task hook. */
\r
2798 if( xTask == NULL )
\r
2800 xTCB = ( TCB_t * ) pxCurrentTCB;
\r
2804 xTCB = ( TCB_t * ) xTask;
\r
2807 /* Save the hook function in the TCB. A critical section is required as
\r
2808 the value can be accessed from an interrupt. */
\r
2809 taskENTER_CRITICAL();
\r
2811 xReturn = xTCB->pxTaskTag;
\r
2813 taskEXIT_CRITICAL();
\r
2818 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2819 /*-----------------------------------------------------------*/
\r
2821 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2823 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
\r
2826 BaseType_t xReturn;
\r
2828 /* If xTask is NULL then we are calling our own task hook. */
\r
2829 if( xTask == NULL )
\r
2831 xTCB = ( TCB_t * ) pxCurrentTCB;
\r
2835 xTCB = ( TCB_t * ) xTask;
\r
2838 if( xTCB->pxTaskTag != NULL )
\r
2840 xReturn = xTCB->pxTaskTag( pvParameter );
\r
2850 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2851 /*-----------------------------------------------------------*/
\r
2853 void vTaskSwitchContext( void )
\r
2855 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
\r
2857 /* The scheduler is currently suspended - do not allow a context
\r
2859 xYieldPending = pdTRUE;
\r
2863 xYieldPending = pdFALSE;
\r
2864 traceTASK_SWITCHED_OUT();
\r
2866 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
2868 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
\r
2869 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
\r
2871 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
\r
2874 /* Add the amount of time the task has been running to the
\r
2875 accumulated time so far. The time the task started running was
\r
2876 stored in ulTaskSwitchedInTime. Note that there is no overflow
\r
2877 protection here so count values are only valid until the timer
\r
2878 overflows. The guard against negative values is to protect
\r
2879 against suspect run time stat counter implementations - which
\r
2880 are provided by the application, not the kernel. */
\r
2881 if( ulTotalRunTime > ulTaskSwitchedInTime )
\r
2883 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
\r
2887 mtCOVERAGE_TEST_MARKER();
\r
2889 ulTaskSwitchedInTime = ulTotalRunTime;
\r
2891 #endif /* configGENERATE_RUN_TIME_STATS */
\r
2893 /* Check for stack overflow, if configured. */
\r
2894 taskCHECK_FOR_STACK_OVERFLOW();
\r
2896 /* Select a new task to run using either the generic C or port
\r
2897 optimised asm code. */
\r
2898 taskSELECT_HIGHEST_PRIORITY_TASK();
\r
2899 traceTASK_SWITCHED_IN();
\r
2901 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
2903 /* Switch Newlib's _impure_ptr variable to point to the _reent
\r
2904 structure specific to this task. */
\r
2905 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
\r
2907 #endif /* configUSE_NEWLIB_REENTRANT */
\r
2910 /*-----------------------------------------------------------*/
\r
2912 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
\r
2914 configASSERT( pxEventList );
\r
2916 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
\r
2917 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
\r
2919 /* Place the event list item of the TCB in the appropriate event list.
\r
2920 This is placed in the list in priority order so the highest priority task
\r
2921 is the first to be woken by the event. The queue that contains the event
\r
2922 list is locked, preventing simultaneous access from interrupts. */
\r
2923 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
2925 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
2927 /*-----------------------------------------------------------*/
\r
2929 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
\r
2931 configASSERT( pxEventList );
\r
2933 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
\r
2934 the event groups implementation. */
\r
2935 configASSERT( uxSchedulerSuspended != 0 );
\r
2937 /* Store the item value in the event list item. It is safe to access the
\r
2938 event list item here as interrupts won't access the event list item of a
\r
2939 task that is not in the Blocked state. */
\r
2940 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
\r
2942 /* Place the event list item of the TCB at the end of the appropriate event
\r
2943 list. It is safe to access the event list here because it is part of an
\r
2944 event group implementation - and interrupts don't access event groups
\r
2945 directly (instead they access them indirectly by pending function calls to
\r
2946 the task level). */
\r
2947 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
2949 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
2951 /*-----------------------------------------------------------*/
\r
2953 #if( configUSE_TIMERS == 1 )
\r
2955 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2957 configASSERT( pxEventList );
\r
2959 /* This function should not be called by application code hence the
\r
2960 'Restricted' in its name. It is not part of the public API. It is
\r
2961 designed for use by kernel code, and has special calling requirements -
\r
2962 it should be called with the scheduler suspended. */
\r
2965 /* Place the event list item of the TCB in the appropriate event list.
\r
2966 In this case it is assume that this is the only task that is going to
\r
2967 be waiting on this event list, so the faster vListInsertEnd() function
\r
2968 can be used in place of vListInsert. */
\r
2969 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
2971 /* If the task should block indefinitely then set the block time to a
\r
2972 value that will be recognised as an indefinite delay inside the
\r
2973 prvAddCurrentTaskToDelayedList() function. */
\r
2974 if( xWaitIndefinitely != pdFALSE )
\r
2976 xTicksToWait = portMAX_DELAY;
\r
2979 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
\r
2980 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
\r
2983 #endif /* configUSE_TIMERS */
\r
2984 /*-----------------------------------------------------------*/
\r
2986 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
\r
2988 TCB_t *pxUnblockedTCB;
\r
2989 BaseType_t xReturn;
\r
2991 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
\r
2992 called from a critical section within an ISR. */
\r
2994 /* The event list is sorted in priority order, so the first in the list can
\r
2995 be removed as it is known to be the highest priority. Remove the TCB from
\r
2996 the delayed list, and add it to the ready list.
\r
2998 If an event is for a queue that is locked then this function will never
\r
2999 get called - the lock count on the queue will get modified instead. This
\r
3000 means exclusive access to the event list is guaranteed here.
\r
3002 This function assumes that a check has already been made to ensure that
\r
3003 pxEventList is not empty. */
\r
3004 pxUnblockedTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
\r
3005 configASSERT( pxUnblockedTCB );
\r
3006 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
\r
3008 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
3010 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
\r
3011 prvAddTaskToReadyList( pxUnblockedTCB );
\r
3015 /* The delayed and ready lists cannot be accessed, so hold this task
\r
3016 pending until the scheduler is resumed. */
\r
3017 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
\r
3020 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
3022 /* Return true if the task removed from the event list has a higher
\r
3023 priority than the calling task. This allows the calling task to know if
\r
3024 it should force a context switch now. */
\r
3027 /* Mark that a yield is pending in case the user is not using the
\r
3028 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
\r
3029 xYieldPending = pdTRUE;
\r
3033 xReturn = pdFALSE;
\r
3036 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3038 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
\r
3039 might be set to the blocked task's time out time. If the task is
\r
3040 unblocked for a reason other than a timeout xNextTaskUnblockTime is
\r
3041 normally left unchanged, because it is automatically reset to a new
\r
3042 value when the tick count equals xNextTaskUnblockTime. However if
\r
3043 tickless idling is used it might be more important to enter sleep mode
\r
3044 at the earliest possible time - so reset xNextTaskUnblockTime here to
\r
3045 ensure it is updated at the earliest possible time. */
\r
3046 prvResetNextTaskUnblockTime();
\r
3052 /*-----------------------------------------------------------*/
\r
3054 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
\r
3056 TCB_t *pxUnblockedTCB;
\r
3058 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
\r
3059 the event flags implementation. */
\r
3060 configASSERT( uxSchedulerSuspended != pdFALSE );
\r
3062 /* Store the new item value in the event list. */
\r
3063 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
\r
3065 /* Remove the event list form the event flag. Interrupts do not access
\r
3067 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
\r
3068 configASSERT( pxUnblockedTCB );
\r
3069 ( void ) uxListRemove( pxEventListItem );
\r
3071 /* Remove the task from the delayed list and add it to the ready list. The
\r
3072 scheduler is suspended so interrupts will not be accessing the ready
\r
3074 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
\r
3075 prvAddTaskToReadyList( pxUnblockedTCB );
\r
3077 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
3079 /* The unblocked task has a priority above that of the calling task, so
\r
3080 a context switch is required. This function is called with the
\r
3081 scheduler suspended so xYieldPending is set so the context switch
\r
3082 occurs immediately that the scheduler is resumed (unsuspended). */
\r
3083 xYieldPending = pdTRUE;
\r
3086 /*-----------------------------------------------------------*/
\r
3088 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
\r
3090 configASSERT( pxTimeOut );
\r
3091 taskENTER_CRITICAL();
\r
3093 pxTimeOut->xOverflowCount = xNumOfOverflows;
\r
3094 pxTimeOut->xTimeOnEntering = xTickCount;
\r
3096 taskEXIT_CRITICAL();
\r
3098 /*-----------------------------------------------------------*/
\r
3100 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
\r
3102 /* For internal use only as it does not use a critical section. */
\r
3103 pxTimeOut->xOverflowCount = xNumOfOverflows;
\r
3104 pxTimeOut->xTimeOnEntering = xTickCount;
\r
3106 /*-----------------------------------------------------------*/
\r
3108 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
\r
3110 BaseType_t xReturn;
\r
3112 configASSERT( pxTimeOut );
\r
3113 configASSERT( pxTicksToWait );
\r
3115 taskENTER_CRITICAL();
\r
3117 /* Minor optimisation. The tick count cannot change in this block. */
\r
3118 const TickType_t xConstTickCount = xTickCount;
\r
3119 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
\r
3121 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
3122 if( pxCurrentTCB->ucDelayAborted != pdFALSE )
\r
3124 /* The delay was aborted, which is not the same as a time out,
\r
3125 but has the same result. */
\r
3126 pxCurrentTCB->ucDelayAborted = pdFALSE;
\r
3132 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3133 if( *pxTicksToWait == portMAX_DELAY )
\r
3135 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
\r
3136 specified is the maximum block time then the task should block
\r
3137 indefinitely, and therefore never time out. */
\r
3138 xReturn = pdFALSE;
\r
3143 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
\r
3145 /* The tick count is greater than the time at which
\r
3146 vTaskSetTimeout() was called, but has also overflowed since
\r
3147 vTaskSetTimeOut() was called. It must have wrapped all the way
\r
3148 around and gone past again. This passed since vTaskSetTimeout()
\r
3152 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
\r
3154 /* Not a genuine timeout. Adjust parameters for time remaining. */
\r
3155 *pxTicksToWait -= xElapsedTime;
\r
3156 vTaskInternalSetTimeOutState( pxTimeOut );
\r
3157 xReturn = pdFALSE;
\r
3161 *pxTicksToWait = 0;
\r
3165 taskEXIT_CRITICAL();
\r
3169 /*-----------------------------------------------------------*/
\r
3171 void vTaskMissedYield( void )
\r
3173 xYieldPending = pdTRUE;
\r
3175 /*-----------------------------------------------------------*/
\r
3177 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3179 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
\r
3181 UBaseType_t uxReturn;
\r
3184 if( xTask != NULL )
\r
3186 pxTCB = ( TCB_t * ) xTask;
\r
3187 uxReturn = pxTCB->uxTaskNumber;
\r
3197 #endif /* configUSE_TRACE_FACILITY */
\r
3198 /*-----------------------------------------------------------*/
\r
3200 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3202 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
\r
3206 if( xTask != NULL )
\r
3208 pxTCB = ( TCB_t * ) xTask;
\r
3209 pxTCB->uxTaskNumber = uxHandle;
\r
3213 #endif /* configUSE_TRACE_FACILITY */
\r
3216 * -----------------------------------------------------------
\r
3218 * ----------------------------------------------------------
\r
3220 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
\r
3221 * language extensions. The equivalent prototype for this function is:
\r
3223 * void prvIdleTask( void *pvParameters );
\r
3226 static portTASK_FUNCTION( prvIdleTask, pvParameters )
\r
3228 /* Stop warnings. */
\r
3229 ( void ) pvParameters;
\r
3231 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
\r
3232 SCHEDULER IS STARTED. **/
\r
3234 /* In case a task that has a secure context deletes itself, in which case
\r
3235 the idle task is responsible for deleting the task's secure context, if
\r
3237 portTASK_CALLS_SECURE_FUNCTIONS();
\r
3241 /* See if any tasks have deleted themselves - if so then the idle task
\r
3242 is responsible for freeing the deleted task's TCB and stack. */
\r
3243 prvCheckTasksWaitingTermination();
\r
3245 #if ( configUSE_PREEMPTION == 0 )
\r
3247 /* If we are not using preemption we keep forcing a task switch to
\r
3248 see if any other task has become available. If we are using
\r
3249 preemption we don't need to do this as any task becoming available
\r
3250 will automatically get the processor anyway. */
\r
3253 #endif /* configUSE_PREEMPTION */
\r
3255 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
\r
3257 /* When using preemption tasks of equal priority will be
\r
3258 timesliced. If a task that is sharing the idle priority is ready
\r
3259 to run then the idle task should yield before the end of the
\r
3262 A critical region is not required here as we are just reading from
\r
3263 the list, and an occasional incorrect value will not matter. If
\r
3264 the ready list at the idle priority contains more than one task
\r
3265 then a task other than the idle task is ready to execute. */
\r
3266 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
\r
3272 mtCOVERAGE_TEST_MARKER();
\r
3275 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
\r
3277 #if ( configUSE_IDLE_HOOK == 1 )
\r
3279 extern void vApplicationIdleHook( void );
\r
3281 /* Call the user defined function from within the idle task. This
\r
3282 allows the application designer to add background functionality
\r
3283 without the overhead of a separate task.
\r
3284 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
\r
3285 CALL A FUNCTION THAT MIGHT BLOCK. */
\r
3286 vApplicationIdleHook();
\r
3288 #endif /* configUSE_IDLE_HOOK */
\r
3290 /* This conditional compilation should use inequality to 0, not equality
\r
3291 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
\r
3292 user defined low power mode implementations require
\r
3293 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
\r
3294 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
3296 TickType_t xExpectedIdleTime;
\r
3298 /* It is not desirable to suspend then resume the scheduler on
\r
3299 each iteration of the idle task. Therefore, a preliminary
\r
3300 test of the expected idle time is performed without the
\r
3301 scheduler suspended. The result here is not necessarily
\r
3303 xExpectedIdleTime = prvGetExpectedIdleTime();
\r
3305 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
\r
3307 vTaskSuspendAll();
\r
3309 /* Now the scheduler is suspended, the expected idle
\r
3310 time can be sampled again, and this time its value can
\r
3312 configASSERT( xNextTaskUnblockTime >= xTickCount );
\r
3313 xExpectedIdleTime = prvGetExpectedIdleTime();
\r
3315 /* Define the following macro to set xExpectedIdleTime to 0
\r
3316 if the application does not want
\r
3317 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
\r
3318 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
\r
3320 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
\r
3322 traceLOW_POWER_IDLE_BEGIN();
\r
3323 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
\r
3324 traceLOW_POWER_IDLE_END();
\r
3328 mtCOVERAGE_TEST_MARKER();
\r
3331 ( void ) xTaskResumeAll();
\r
3335 mtCOVERAGE_TEST_MARKER();
\r
3338 #endif /* configUSE_TICKLESS_IDLE */
\r
3341 /*-----------------------------------------------------------*/
\r
3343 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3345 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
\r
3347 /* The idle task exists in addition to the application tasks. */
\r
3348 const UBaseType_t uxNonApplicationTasks = 1;
\r
3349 eSleepModeStatus eReturn = eStandardSleep;
\r
3351 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
\r
3353 /* A task was made ready while the scheduler was suspended. */
\r
3354 eReturn = eAbortSleep;
\r
3356 else if( xYieldPending != pdFALSE )
\r
3358 /* A yield was pended while the scheduler was suspended. */
\r
3359 eReturn = eAbortSleep;
\r
3363 /* If all the tasks are in the suspended list (which might mean they
\r
3364 have an infinite block time rather than actually being suspended)
\r
3365 then it is safe to turn all clocks off and just wait for external
\r
3367 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
\r
3369 eReturn = eNoTasksWaitingTimeout;
\r
3373 mtCOVERAGE_TEST_MARKER();
\r
3380 #endif /* configUSE_TICKLESS_IDLE */
\r
3381 /*-----------------------------------------------------------*/
\r
3383 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
3385 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
\r
3389 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
\r
3391 pxTCB = prvGetTCBFromHandle( xTaskToSet );
\r
3392 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
\r
3396 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
\r
3397 /*-----------------------------------------------------------*/
\r
3399 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
3401 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
\r
3403 void *pvReturn = NULL;
\r
3406 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
\r
3408 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
\r
3409 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
\r
3419 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
\r
3420 /*-----------------------------------------------------------*/
\r
3422 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
3424 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
\r
3428 /* If null is passed in here then we are modifying the MPU settings of
\r
3429 the calling task. */
\r
3430 pxTCB = prvGetTCBFromHandle( xTaskToModify );
\r
3432 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
\r
3435 #endif /* portUSING_MPU_WRAPPERS */
\r
3436 /*-----------------------------------------------------------*/
\r
3438 static void prvInitialiseTaskLists( void )
\r
3440 UBaseType_t uxPriority;
\r
3442 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
\r
3444 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
\r
3447 vListInitialise( &xDelayedTaskList1 );
\r
3448 vListInitialise( &xDelayedTaskList2 );
\r
3449 vListInitialise( &xPendingReadyList );
\r
3451 #if ( INCLUDE_vTaskDelete == 1 )
\r
3453 vListInitialise( &xTasksWaitingTermination );
\r
3455 #endif /* INCLUDE_vTaskDelete */
\r
3457 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3459 vListInitialise( &xSuspendedTaskList );
\r
3461 #endif /* INCLUDE_vTaskSuspend */
\r
3463 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
\r
3465 pxDelayedTaskList = &xDelayedTaskList1;
\r
3466 pxOverflowDelayedTaskList = &xDelayedTaskList2;
\r
3468 /*-----------------------------------------------------------*/
\r
3470 static void prvCheckTasksWaitingTermination( void )
\r
3473 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
\r
3475 #if ( INCLUDE_vTaskDelete == 1 )
\r
3479 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
\r
3480 being called too often in the idle task. */
\r
3481 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
\r
3483 taskENTER_CRITICAL();
\r
3485 pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
\r
3486 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
3487 --uxCurrentNumberOfTasks;
\r
3488 --uxDeletedTasksWaitingCleanUp;
\r
3490 taskEXIT_CRITICAL();
\r
3492 prvDeleteTCB( pxTCB );
\r
3495 #endif /* INCLUDE_vTaskDelete */
\r
3497 /*-----------------------------------------------------------*/
\r
3499 #if( configUSE_TRACE_FACILITY == 1 )
\r
3501 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
\r
3505 /* xTask is NULL then get the state of the calling task. */
\r
3506 pxTCB = prvGetTCBFromHandle( xTask );
\r
3508 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
\r
3509 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
\r
3510 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
\r
3511 pxTaskStatus->pxStackBase = pxTCB->pxStack;
\r
3512 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
\r
3514 #if ( configUSE_MUTEXES == 1 )
\r
3516 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
\r
3520 pxTaskStatus->uxBasePriority = 0;
\r
3524 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
3526 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
\r
3530 pxTaskStatus->ulRunTimeCounter = 0;
\r
3534 /* Obtaining the task state is a little fiddly, so is only done if the
\r
3535 value of eState passed into this function is eInvalid - otherwise the
\r
3536 state is just set to whatever is passed in. */
\r
3537 if( eState != eInvalid )
\r
3539 if( pxTCB == pxCurrentTCB )
\r
3541 pxTaskStatus->eCurrentState = eRunning;
\r
3545 pxTaskStatus->eCurrentState = eState;
\r
3547 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3549 /* If the task is in the suspended list then there is a
\r
3550 chance it is actually just blocked indefinitely - so really
\r
3551 it should be reported as being in the Blocked state. */
\r
3552 if( eState == eSuspended )
\r
3554 vTaskSuspendAll();
\r
3556 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
3558 pxTaskStatus->eCurrentState = eBlocked;
\r
3561 ( void ) xTaskResumeAll();
\r
3564 #endif /* INCLUDE_vTaskSuspend */
\r
3569 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
\r
3572 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
\r
3573 parameter is provided to allow it to be skipped. */
\r
3574 if( xGetFreeStackSpace != pdFALSE )
\r
3576 #if ( portSTACK_GROWTH > 0 )
\r
3578 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
\r
3582 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
\r
3588 pxTaskStatus->usStackHighWaterMark = 0;
\r
3592 #endif /* configUSE_TRACE_FACILITY */
\r
3593 /*-----------------------------------------------------------*/
\r
3595 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3597 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
\r
3599 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
\r
3600 UBaseType_t uxTask = 0;
\r
3602 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
\r
3604 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
\r
3606 /* Populate an TaskStatus_t structure within the
\r
3607 pxTaskStatusArray array for each task that is referenced from
\r
3608 pxList. See the definition of TaskStatus_t in task.h for the
\r
3609 meaning of each TaskStatus_t structure member. */
\r
3612 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
\r
3613 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
\r
3615 } while( pxNextTCB != pxFirstTCB );
\r
3619 mtCOVERAGE_TEST_MARKER();
\r
3625 #endif /* configUSE_TRACE_FACILITY */
\r
3626 /*-----------------------------------------------------------*/
\r
3628 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )
\r
3630 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
\r
3632 uint32_t ulCount = 0U;
\r
3634 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
\r
3636 pucStackByte -= portSTACK_GROWTH;
\r
3640 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
\r
3642 return ( configSTACK_DEPTH_TYPE ) ulCount;
\r
3645 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */
\r
3646 /*-----------------------------------------------------------*/
\r
3648 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
\r
3650 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
\r
3653 uint8_t *pucEndOfStack;
\r
3654 UBaseType_t uxReturn;
\r
3656 pxTCB = prvGetTCBFromHandle( xTask );
\r
3658 #if portSTACK_GROWTH < 0
\r
3660 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
\r
3664 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
\r
3668 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
\r
3673 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
\r
3674 /*-----------------------------------------------------------*/
\r
3676 #if ( INCLUDE_vTaskDelete == 1 )
\r
3678 static void prvDeleteTCB( TCB_t *pxTCB )
\r
3680 /* This call is required specifically for the TriCore port. It must be
\r
3681 above the vPortFree() calls. The call is also used by ports/demos that
\r
3682 want to allocate and clean RAM statically. */
\r
3683 portCLEAN_UP_TCB( pxTCB );
\r
3685 /* Free up the memory allocated by the scheduler for the task. It is up
\r
3686 to the task to free any memory allocated at the application level. */
\r
3687 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
3689 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
\r
3691 #endif /* configUSE_NEWLIB_REENTRANT */
\r
3693 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
\r
3695 /* The task can only have been allocated dynamically - free both
\r
3696 the stack and TCB. */
\r
3697 vPortFree( pxTCB->pxStack );
\r
3698 vPortFree( pxTCB );
\r
3700 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 Macro has been consolidated for readability reasons. */
\r
3702 /* The task could have been allocated statically or dynamically, so
\r
3703 check what was statically allocated before trying to free the
\r
3705 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
\r
3707 /* Both the stack and TCB were allocated dynamically, so both
\r
3709 vPortFree( pxTCB->pxStack );
\r
3710 vPortFree( pxTCB );
\r
3712 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
\r
3714 /* Only the stack was statically allocated, so the TCB is the
\r
3715 only memory that must be freed. */
\r
3716 vPortFree( pxTCB );
\r
3720 /* Neither the stack nor the TCB were allocated dynamically, so
\r
3721 nothing needs to be freed. */
\r
3722 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
\r
3723 mtCOVERAGE_TEST_MARKER();
\r
3726 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
3729 #endif /* INCLUDE_vTaskDelete */
\r
3730 /*-----------------------------------------------------------*/
\r
3732 static void prvResetNextTaskUnblockTime( void )
\r
3736 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
\r
3738 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
\r
3739 the maximum possible value so it is extremely unlikely that the
\r
3740 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
\r
3741 there is an item in the delayed list. */
\r
3742 xNextTaskUnblockTime = portMAX_DELAY;
\r
3746 /* The new current delayed list is not empty, get the value of
\r
3747 the item at the head of the delayed list. This is the time at
\r
3748 which the task at the head of the delayed list should be removed
\r
3749 from the Blocked state. */
\r
3750 ( pxTCB ) = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
\r
3751 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
\r
3754 /*-----------------------------------------------------------*/
\r
3756 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
\r
3758 TaskHandle_t xTaskGetCurrentTaskHandle( void )
\r
3760 TaskHandle_t xReturn;
\r
3762 /* A critical section is not required as this is not called from
\r
3763 an interrupt and the current TCB will always be the same for any
\r
3764 individual execution thread. */
\r
3765 xReturn = pxCurrentTCB;
\r
3770 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
\r
3771 /*-----------------------------------------------------------*/
\r
3773 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
3775 BaseType_t xTaskGetSchedulerState( void )
\r
3777 BaseType_t xReturn;
\r
3779 if( xSchedulerRunning == pdFALSE )
\r
3781 xReturn = taskSCHEDULER_NOT_STARTED;
\r
3785 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
3787 xReturn = taskSCHEDULER_RUNNING;
\r
3791 xReturn = taskSCHEDULER_SUSPENDED;
\r
3798 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
\r
3799 /*-----------------------------------------------------------*/
\r
3801 #if ( configUSE_MUTEXES == 1 )
\r
3803 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
\r
3805 TCB_t * const pxMutexHolderTCB = ( TCB_t * ) pxMutexHolder;
\r
3806 BaseType_t xReturn = pdFALSE;
\r
3808 /* If the mutex was given back by an interrupt while the queue was
\r
3809 locked then the mutex holder might now be NULL. _RB_ Is this still
\r
3810 needed as interrupts can no longer use mutexes? */
\r
3811 if( pxMutexHolder != NULL )
\r
3813 /* If the holder of the mutex has a priority below the priority of
\r
3814 the task attempting to obtain the mutex then it will temporarily
\r
3815 inherit the priority of the task attempting to obtain the mutex. */
\r
3816 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
\r
3818 /* Adjust the mutex holder state to account for its new
\r
3819 priority. Only reset the event list item value if the value is
\r
3820 not being used for anything else. */
\r
3821 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
3823 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
3827 mtCOVERAGE_TEST_MARKER();
\r
3830 /* If the task being modified is in the ready state it will need
\r
3831 to be moved into a new list. */
\r
3832 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
\r
3834 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
3836 taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );
\r
3840 mtCOVERAGE_TEST_MARKER();
\r
3843 /* Inherit the priority before being moved into the new list. */
\r
3844 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
\r
3845 prvAddTaskToReadyList( pxMutexHolderTCB );
\r
3849 /* Just inherit the priority. */
\r
3850 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
\r
3853 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
\r
3855 /* Inheritance occurred. */
\r
3860 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
\r
3862 /* The base priority of the mutex holder is lower than the
\r
3863 priority of the task attempting to take the mutex, but the
\r
3864 current priority of the mutex holder is not lower than the
\r
3865 priority of the task attempting to take the mutex.
\r
3866 Therefore the mutex holder must have already inherited a
\r
3867 priority, but inheritance would have occurred if that had
\r
3868 not been the case. */
\r
3873 mtCOVERAGE_TEST_MARKER();
\r
3879 mtCOVERAGE_TEST_MARKER();
\r
3885 #endif /* configUSE_MUTEXES */
\r
3886 /*-----------------------------------------------------------*/
\r
3888 #if ( configUSE_MUTEXES == 1 )
\r
3890 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
\r
3892 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
\r
3893 BaseType_t xReturn = pdFALSE;
\r
3895 if( pxMutexHolder != NULL )
\r
3897 /* A task can only have an inherited priority if it holds the mutex.
\r
3898 If the mutex is held by a task then it cannot be given from an
\r
3899 interrupt, and if a mutex is given by the holding task then it must
\r
3900 be the running state task. */
\r
3901 configASSERT( pxTCB == pxCurrentTCB );
\r
3902 configASSERT( pxTCB->uxMutexesHeld );
\r
3903 ( pxTCB->uxMutexesHeld )--;
\r
3905 /* Has the holder of the mutex inherited the priority of another
\r
3907 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
\r
3909 /* Only disinherit if no other mutexes are held. */
\r
3910 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
\r
3912 /* A task can only have an inherited priority if it holds
\r
3913 the mutex. If the mutex is held by a task then it cannot be
\r
3914 given from an interrupt, and if a mutex is given by the
\r
3915 holding task then it must be the running state task. Remove
\r
3916 the holding task from the ready list. */
\r
3917 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
3919 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
3923 mtCOVERAGE_TEST_MARKER();
\r
3926 /* Disinherit the priority before adding the task into the
\r
3927 new ready list. */
\r
3928 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
\r
3929 pxTCB->uxPriority = pxTCB->uxBasePriority;
\r
3931 /* Reset the event list item value. It cannot be in use for
\r
3932 any other purpose if this task is running, and it must be
\r
3933 running to give back the mutex. */
\r
3934 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
3935 prvAddTaskToReadyList( pxTCB );
\r
3937 /* Return true to indicate that a context switch is required.
\r
3938 This is only actually required in the corner case whereby
\r
3939 multiple mutexes were held and the mutexes were given back
\r
3940 in an order different to that in which they were taken.
\r
3941 If a context switch did not occur when the first mutex was
\r
3942 returned, even if a task was waiting on it, then a context
\r
3943 switch should occur when the last mutex is returned whether
\r
3944 a task is waiting on it or not. */
\r
3949 mtCOVERAGE_TEST_MARKER();
\r
3954 mtCOVERAGE_TEST_MARKER();
\r
3959 mtCOVERAGE_TEST_MARKER();
\r
3965 #endif /* configUSE_MUTEXES */
\r
3966 /*-----------------------------------------------------------*/
\r
3968 #if ( configUSE_MUTEXES == 1 )
\r
3970 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
\r
3972 TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
\r
3973 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
\r
3974 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
\r
3976 if( pxMutexHolder != NULL )
\r
3978 /* If pxMutexHolder is not NULL then the holder must hold at least
\r
3980 configASSERT( pxTCB->uxMutexesHeld );
\r
3982 /* Determine the priority to which the priority of the task that
\r
3983 holds the mutex should be set. This will be the greater of the
\r
3984 holding task's base priority and the priority of the highest
\r
3985 priority task that is waiting to obtain the mutex. */
\r
3986 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
\r
3988 uxPriorityToUse = uxHighestPriorityWaitingTask;
\r
3992 uxPriorityToUse = pxTCB->uxBasePriority;
\r
3995 /* Does the priority need to change? */
\r
3996 if( pxTCB->uxPriority != uxPriorityToUse )
\r
3998 /* Only disinherit if no other mutexes are held. This is a
\r
3999 simplification in the priority inheritance implementation. If
\r
4000 the task that holds the mutex is also holding other mutexes then
\r
4001 the other mutexes may have caused the priority inheritance. */
\r
4002 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
\r
4004 /* If a task has timed out because it already holds the
\r
4005 mutex it was trying to obtain then it cannot of inherited
\r
4006 its own priority. */
\r
4007 configASSERT( pxTCB != pxCurrentTCB );
\r
4009 /* Disinherit the priority, remembering the previous
\r
4010 priority to facilitate determining the subject task's
\r
4012 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
\r
4013 uxPriorityUsedOnEntry = pxTCB->uxPriority;
\r
4014 pxTCB->uxPriority = uxPriorityToUse;
\r
4016 /* Only reset the event list item value if the value is not
\r
4017 being used for anything else. */
\r
4018 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
4020 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4024 mtCOVERAGE_TEST_MARKER();
\r
4027 /* If the running task is not the task that holds the mutex
\r
4028 then the task that holds the mutex could be in either the
\r
4029 Ready, Blocked or Suspended states. Only remove the task
\r
4030 from its current state list if it is in the Ready state as
\r
4031 the task's priority is going to change and there is one
\r
4032 Ready list per priority. */
\r
4033 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
4035 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4037 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
4041 mtCOVERAGE_TEST_MARKER();
\r
4044 prvAddTaskToReadyList( pxTCB );
\r
4048 mtCOVERAGE_TEST_MARKER();
\r
4053 mtCOVERAGE_TEST_MARKER();
\r
4058 mtCOVERAGE_TEST_MARKER();
\r
4063 mtCOVERAGE_TEST_MARKER();
\r
4067 #endif /* configUSE_MUTEXES */
\r
4068 /*-----------------------------------------------------------*/
\r
4070 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
4072 void vTaskEnterCritical( void )
\r
4074 portDISABLE_INTERRUPTS();
\r
4076 if( xSchedulerRunning != pdFALSE )
\r
4078 ( pxCurrentTCB->uxCriticalNesting )++;
\r
4080 /* This is not the interrupt safe version of the enter critical
\r
4081 function so assert() if it is being called from an interrupt
\r
4082 context. Only API functions that end in "FromISR" can be used in an
\r
4083 interrupt. Only assert if the critical nesting count is 1 to
\r
4084 protect against recursive calls if the assert function also uses a
\r
4085 critical section. */
\r
4086 if( pxCurrentTCB->uxCriticalNesting == 1 )
\r
4088 portASSERT_IF_IN_ISR();
\r
4093 mtCOVERAGE_TEST_MARKER();
\r
4097 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
4098 /*-----------------------------------------------------------*/
\r
4100 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
4102 void vTaskExitCritical( void )
\r
4104 if( xSchedulerRunning != pdFALSE )
\r
4106 if( pxCurrentTCB->uxCriticalNesting > 0U )
\r
4108 ( pxCurrentTCB->uxCriticalNesting )--;
\r
4110 if( pxCurrentTCB->uxCriticalNesting == 0U )
\r
4112 portENABLE_INTERRUPTS();
\r
4116 mtCOVERAGE_TEST_MARKER();
\r
4121 mtCOVERAGE_TEST_MARKER();
\r
4126 mtCOVERAGE_TEST_MARKER();
\r
4130 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
4131 /*-----------------------------------------------------------*/
\r
4133 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
\r
4135 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
\r
4139 /* Start by copying the entire string. */
\r
4140 strcpy( pcBuffer, pcTaskName );
\r
4142 /* Pad the end of the string with spaces to ensure columns line up when
\r
4144 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
\r
4146 pcBuffer[ x ] = ' ';
\r
4150 pcBuffer[ x ] = 0x00;
\r
4152 /* Return the new end of string. */
\r
4153 return &( pcBuffer[ x ] );
\r
4156 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
\r
4157 /*-----------------------------------------------------------*/
\r
4159 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
4161 void vTaskList( char * pcWriteBuffer )
\r
4163 TaskStatus_t *pxTaskStatusArray;
\r
4164 volatile UBaseType_t uxArraySize, x;
\r
4170 * This function is provided for convenience only, and is used by many
\r
4171 * of the demo applications. Do not consider it to be part of the
\r
4174 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
\r
4175 * uxTaskGetSystemState() output into a human readable table that
\r
4176 * displays task names, states and stack usage.
\r
4178 * vTaskList() has a dependency on the sprintf() C library function that
\r
4179 * might bloat the code size, use a lot of stack, and provide different
\r
4180 * results on different platforms. An alternative, tiny, third party,
\r
4181 * and limited functionality implementation of sprintf() is provided in
\r
4182 * many of the FreeRTOS/Demo sub-directories in a file called
\r
4183 * printf-stdarg.c (note printf-stdarg.c does not provide a full
\r
4184 * snprintf() implementation!).
\r
4186 * It is recommended that production systems call uxTaskGetSystemState()
\r
4187 * directly to get access to raw stats data, rather than indirectly
\r
4188 * through a call to vTaskList().
\r
4192 /* Make sure the write buffer does not contain a string. */
\r
4193 *pcWriteBuffer = 0x00;
\r
4195 /* Take a snapshot of the number of tasks in case it changes while this
\r
4196 function is executing. */
\r
4197 uxArraySize = uxCurrentNumberOfTasks;
\r
4199 /* Allocate an array index for each task. NOTE! if
\r
4200 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
\r
4201 equate to NULL. */
\r
4202 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
\r
4204 if( pxTaskStatusArray != NULL )
\r
4206 /* Generate the (binary) data. */
\r
4207 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
\r
4209 /* Create a human readable table from the binary data. */
\r
4210 for( x = 0; x < uxArraySize; x++ )
\r
4212 switch( pxTaskStatusArray[ x ].eCurrentState )
\r
4214 case eRunning: cStatus = tskRUNNING_CHAR;
\r
4217 case eReady: cStatus = tskREADY_CHAR;
\r
4220 case eBlocked: cStatus = tskBLOCKED_CHAR;
\r
4223 case eSuspended: cStatus = tskSUSPENDED_CHAR;
\r
4226 case eDeleted: cStatus = tskDELETED_CHAR;
\r
4229 default: /* Should not get here, but it is included
\r
4230 to prevent static checking errors. */
\r
4235 /* Write the task name to the string, padding with spaces so it
\r
4236 can be printed in tabular form more easily. */
\r
4237 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
\r
4239 /* Write the rest of the string. */
\r
4240 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
\r
4241 pcWriteBuffer += strlen( pcWriteBuffer );
\r
4244 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
\r
4245 is 0 then vPortFree() will be #defined to nothing. */
\r
4246 vPortFree( pxTaskStatusArray );
\r
4250 mtCOVERAGE_TEST_MARKER();
\r
4254 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
4255 /*----------------------------------------------------------*/
\r
4257 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
4259 void vTaskGetRunTimeStats( char *pcWriteBuffer )
\r
4261 TaskStatus_t *pxTaskStatusArray;
\r
4262 volatile UBaseType_t uxArraySize, x;
\r
4263 uint32_t ulTotalTime, ulStatsAsPercentage;
\r
4265 #if( configUSE_TRACE_FACILITY != 1 )
\r
4267 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
\r
4274 * This function is provided for convenience only, and is used by many
\r
4275 * of the demo applications. Do not consider it to be part of the
\r
4278 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
\r
4279 * of the uxTaskGetSystemState() output into a human readable table that
\r
4280 * displays the amount of time each task has spent in the Running state
\r
4281 * in both absolute and percentage terms.
\r
4283 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
\r
4284 * function that might bloat the code size, use a lot of stack, and
\r
4285 * provide different results on different platforms. An alternative,
\r
4286 * tiny, third party, and limited functionality implementation of
\r
4287 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
\r
4288 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
\r
4289 * a full snprintf() implementation!).
\r
4291 * It is recommended that production systems call uxTaskGetSystemState()
\r
4292 * directly to get access to raw stats data, rather than indirectly
\r
4293 * through a call to vTaskGetRunTimeStats().
\r
4296 /* Make sure the write buffer does not contain a string. */
\r
4297 *pcWriteBuffer = 0x00;
\r
4299 /* Take a snapshot of the number of tasks in case it changes while this
\r
4300 function is executing. */
\r
4301 uxArraySize = uxCurrentNumberOfTasks;
\r
4303 /* Allocate an array index for each task. NOTE! If
\r
4304 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
\r
4305 equate to NULL. */
\r
4306 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
\r
4308 if( pxTaskStatusArray != NULL )
\r
4310 /* Generate the (binary) data. */
\r
4311 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
\r
4313 /* For percentage calculations. */
\r
4314 ulTotalTime /= 100UL;
\r
4316 /* Avoid divide by zero errors. */
\r
4317 if( ulTotalTime > 0 )
\r
4319 /* Create a human readable table from the binary data. */
\r
4320 for( x = 0; x < uxArraySize; x++ )
\r
4322 /* What percentage of the total run time has the task used?
\r
4323 This will always be rounded down to the nearest integer.
\r
4324 ulTotalRunTimeDiv100 has already been divided by 100. */
\r
4325 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
\r
4327 /* Write the task name to the string, padding with
\r
4328 spaces so it can be printed in tabular form more
\r
4330 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
\r
4332 if( ulStatsAsPercentage > 0UL )
\r
4334 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
\r
4336 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
\r
4340 /* sizeof( int ) == sizeof( long ) so a smaller
\r
4341 printf() library can be used. */
\r
4342 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage );
\r
4348 /* If the percentage is zero here then the task has
\r
4349 consumed less than 1% of the total run time. */
\r
4350 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
\r
4352 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
\r
4356 /* sizeof( int ) == sizeof( long ) so a smaller
\r
4357 printf() library can be used. */
\r
4358 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
\r
4363 pcWriteBuffer += strlen( pcWriteBuffer );
\r
4368 mtCOVERAGE_TEST_MARKER();
\r
4371 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
\r
4372 is 0 then vPortFree() will be #defined to nothing. */
\r
4373 vPortFree( pxTaskStatusArray );
\r
4377 mtCOVERAGE_TEST_MARKER();
\r
4381 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
\r
4382 /*-----------------------------------------------------------*/
\r
4384 TickType_t uxTaskResetEventItemValue( void )
\r
4386 TickType_t uxReturn;
\r
4388 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
\r
4390 /* Reset the event list item to its normal value - so it can be used with
\r
4391 queues and semaphores. */
\r
4392 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4396 /*-----------------------------------------------------------*/
\r
4398 #if ( configUSE_MUTEXES == 1 )
\r
4400 void *pvTaskIncrementMutexHeldCount( void )
\r
4402 /* If xSemaphoreCreateMutex() is called before any tasks have been created
\r
4403 then pxCurrentTCB will be NULL. */
\r
4404 if( pxCurrentTCB != NULL )
\r
4406 ( pxCurrentTCB->uxMutexesHeld )++;
\r
4409 return pxCurrentTCB;
\r
4412 #endif /* configUSE_MUTEXES */
\r
4413 /*-----------------------------------------------------------*/
\r
4415 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4417 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
\r
4419 uint32_t ulReturn;
\r
4421 taskENTER_CRITICAL();
\r
4423 /* Only block if the notification count is not already non-zero. */
\r
4424 if( pxCurrentTCB->ulNotifiedValue == 0UL )
\r
4426 /* Mark this task as waiting for a notification. */
\r
4427 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
\r
4429 if( xTicksToWait > ( TickType_t ) 0 )
\r
4431 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
4432 traceTASK_NOTIFY_TAKE_BLOCK();
\r
4434 /* All ports are written to allow a yield in a critical
\r
4435 section (some will yield immediately, others wait until the
\r
4436 critical section exits) - but it is not something that
\r
4437 application code should ever do. */
\r
4438 portYIELD_WITHIN_API();
\r
4442 mtCOVERAGE_TEST_MARKER();
\r
4447 mtCOVERAGE_TEST_MARKER();
\r
4450 taskEXIT_CRITICAL();
\r
4452 taskENTER_CRITICAL();
\r
4454 traceTASK_NOTIFY_TAKE();
\r
4455 ulReturn = pxCurrentTCB->ulNotifiedValue;
\r
4457 if( ulReturn != 0UL )
\r
4459 if( xClearCountOnExit != pdFALSE )
\r
4461 pxCurrentTCB->ulNotifiedValue = 0UL;
\r
4465 pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
\r
4470 mtCOVERAGE_TEST_MARKER();
\r
4473 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
4475 taskEXIT_CRITICAL();
\r
4480 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4481 /*-----------------------------------------------------------*/
\r
4483 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4485 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
\r
4487 BaseType_t xReturn;
\r
4489 taskENTER_CRITICAL();
\r
4491 /* Only block if a notification is not already pending. */
\r
4492 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
\r
4494 /* Clear bits in the task's notification value as bits may get
\r
4495 set by the notifying task or interrupt. This can be used to
\r
4496 clear the value to zero. */
\r
4497 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
\r
4499 /* Mark this task as waiting for a notification. */
\r
4500 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
\r
4502 if( xTicksToWait > ( TickType_t ) 0 )
\r
4504 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
4505 traceTASK_NOTIFY_WAIT_BLOCK();
\r
4507 /* All ports are written to allow a yield in a critical
\r
4508 section (some will yield immediately, others wait until the
\r
4509 critical section exits) - but it is not something that
\r
4510 application code should ever do. */
\r
4511 portYIELD_WITHIN_API();
\r
4515 mtCOVERAGE_TEST_MARKER();
\r
4520 mtCOVERAGE_TEST_MARKER();
\r
4523 taskEXIT_CRITICAL();
\r
4525 taskENTER_CRITICAL();
\r
4527 traceTASK_NOTIFY_WAIT();
\r
4529 if( pulNotificationValue != NULL )
\r
4531 /* Output the current notification value, which may or may not
\r
4533 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
\r
4536 /* If ucNotifyValue is set then either the task never entered the
\r
4537 blocked state (because a notification was already pending) or the
\r
4538 task unblocked because of a notification. Otherwise the task
\r
4539 unblocked because of a timeout. */
\r
4540 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
\r
4542 /* A notification was not received. */
\r
4543 xReturn = pdFALSE;
\r
4547 /* A notification was already pending or a notification was
\r
4548 received while the task was waiting. */
\r
4549 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
\r
4553 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
4555 taskEXIT_CRITICAL();
\r
4560 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4561 /*-----------------------------------------------------------*/
\r
4563 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4565 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
\r
4568 BaseType_t xReturn = pdPASS;
\r
4569 uint8_t ucOriginalNotifyState;
\r
4571 configASSERT( xTaskToNotify );
\r
4572 pxTCB = ( TCB_t * ) xTaskToNotify;
\r
4574 taskENTER_CRITICAL();
\r
4576 if( pulPreviousNotificationValue != NULL )
\r
4578 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
\r
4581 ucOriginalNotifyState = pxTCB->ucNotifyState;
\r
4583 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
\r
4588 pxTCB->ulNotifiedValue |= ulValue;
\r
4592 ( pxTCB->ulNotifiedValue )++;
\r
4595 case eSetValueWithOverwrite :
\r
4596 pxTCB->ulNotifiedValue = ulValue;
\r
4599 case eSetValueWithoutOverwrite :
\r
4600 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
\r
4602 pxTCB->ulNotifiedValue = ulValue;
\r
4606 /* The value could not be written to the task. */
\r
4612 /* The task is being notified without its notify value being
\r
4617 traceTASK_NOTIFY();
\r
4619 /* If the task is in the blocked state specifically to wait for a
\r
4620 notification then unblock it now. */
\r
4621 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
4623 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
4624 prvAddTaskToReadyList( pxTCB );
\r
4626 /* The task should not have been on an event list. */
\r
4627 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
4629 #if( configUSE_TICKLESS_IDLE != 0 )
\r
4631 /* If a task is blocked waiting for a notification then
\r
4632 xNextTaskUnblockTime might be set to the blocked task's time
\r
4633 out time. If the task is unblocked for a reason other than
\r
4634 a timeout xNextTaskUnblockTime is normally left unchanged,
\r
4635 because it will automatically get reset to a new value when
\r
4636 the tick count equals xNextTaskUnblockTime. However if
\r
4637 tickless idling is used it might be more important to enter
\r
4638 sleep mode at the earliest possible time - so reset
\r
4639 xNextTaskUnblockTime here to ensure it is updated at the
\r
4640 earliest possible time. */
\r
4641 prvResetNextTaskUnblockTime();
\r
4645 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
4647 /* The notified task has a priority above the currently
\r
4648 executing task so a yield is required. */
\r
4649 taskYIELD_IF_USING_PREEMPTION();
\r
4653 mtCOVERAGE_TEST_MARKER();
\r
4658 mtCOVERAGE_TEST_MARKER();
\r
4661 taskEXIT_CRITICAL();
\r
4666 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4667 /*-----------------------------------------------------------*/
\r
4669 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4671 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
\r
4674 uint8_t ucOriginalNotifyState;
\r
4675 BaseType_t xReturn = pdPASS;
\r
4676 UBaseType_t uxSavedInterruptStatus;
\r
4678 configASSERT( xTaskToNotify );
\r
4680 /* RTOS ports that support interrupt nesting have the concept of a
\r
4681 maximum system call (or maximum API call) interrupt priority.
\r
4682 Interrupts that are above the maximum system call priority are keep
\r
4683 permanently enabled, even when the RTOS kernel is in a critical section,
\r
4684 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
4685 is defined in FreeRTOSConfig.h then
\r
4686 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
4687 failure if a FreeRTOS API function is called from an interrupt that has
\r
4688 been assigned a priority above the configured maximum system call
\r
4689 priority. Only FreeRTOS functions that end in FromISR can be called
\r
4690 from interrupts that have been assigned a priority at or (logically)
\r
4691 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
4692 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
4693 simple as possible. More information (albeit Cortex-M specific) is
\r
4694 provided on the following link:
\r
4695 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
4696 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
4698 pxTCB = ( TCB_t * ) xTaskToNotify;
\r
4700 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
4702 if( pulPreviousNotificationValue != NULL )
\r
4704 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
\r
4707 ucOriginalNotifyState = pxTCB->ucNotifyState;
\r
4708 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
\r
4713 pxTCB->ulNotifiedValue |= ulValue;
\r
4717 ( pxTCB->ulNotifiedValue )++;
\r
4720 case eSetValueWithOverwrite :
\r
4721 pxTCB->ulNotifiedValue = ulValue;
\r
4724 case eSetValueWithoutOverwrite :
\r
4725 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
\r
4727 pxTCB->ulNotifiedValue = ulValue;
\r
4731 /* The value could not be written to the task. */
\r
4737 /* The task is being notified without its notify value being
\r
4742 traceTASK_NOTIFY_FROM_ISR();
\r
4744 /* If the task is in the blocked state specifically to wait for a
\r
4745 notification then unblock it now. */
\r
4746 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
4748 /* The task should not have been on an event list. */
\r
4749 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
4751 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
4753 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
4754 prvAddTaskToReadyList( pxTCB );
\r
4758 /* The delayed and ready lists cannot be accessed, so hold
\r
4759 this task pending until the scheduler is resumed. */
\r
4760 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
4763 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
4765 /* The notified task has a priority above the currently
\r
4766 executing task so a yield is required. */
\r
4767 if( pxHigherPriorityTaskWoken != NULL )
\r
4769 *pxHigherPriorityTaskWoken = pdTRUE;
\r
4773 /* Mark that a yield is pending in case the user is not
\r
4774 using the "xHigherPriorityTaskWoken" parameter to an ISR
\r
4775 safe FreeRTOS function. */
\r
4776 xYieldPending = pdTRUE;
\r
4781 mtCOVERAGE_TEST_MARKER();
\r
4785 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
4790 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4791 /*-----------------------------------------------------------*/
\r
4793 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4795 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
\r
4798 uint8_t ucOriginalNotifyState;
\r
4799 UBaseType_t uxSavedInterruptStatus;
\r
4801 configASSERT( xTaskToNotify );
\r
4803 /* RTOS ports that support interrupt nesting have the concept of a
\r
4804 maximum system call (or maximum API call) interrupt priority.
\r
4805 Interrupts that are above the maximum system call priority are keep
\r
4806 permanently enabled, even when the RTOS kernel is in a critical section,
\r
4807 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
4808 is defined in FreeRTOSConfig.h then
\r
4809 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
4810 failure if a FreeRTOS API function is called from an interrupt that has
\r
4811 been assigned a priority above the configured maximum system call
\r
4812 priority. Only FreeRTOS functions that end in FromISR can be called
\r
4813 from interrupts that have been assigned a priority at or (logically)
\r
4814 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
4815 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
4816 simple as possible. More information (albeit Cortex-M specific) is
\r
4817 provided on the following link:
\r
4818 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
4819 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
4821 pxTCB = ( TCB_t * ) xTaskToNotify;
\r
4823 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
4825 ucOriginalNotifyState = pxTCB->ucNotifyState;
\r
4826 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
\r
4828 /* 'Giving' is equivalent to incrementing a count in a counting
\r
4830 ( pxTCB->ulNotifiedValue )++;
\r
4832 traceTASK_NOTIFY_GIVE_FROM_ISR();
\r
4834 /* If the task is in the blocked state specifically to wait for a
\r
4835 notification then unblock it now. */
\r
4836 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
4838 /* The task should not have been on an event list. */
\r
4839 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
4841 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
4843 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
4844 prvAddTaskToReadyList( pxTCB );
\r
4848 /* The delayed and ready lists cannot be accessed, so hold
\r
4849 this task pending until the scheduler is resumed. */
\r
4850 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
4853 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
4855 /* The notified task has a priority above the currently
\r
4856 executing task so a yield is required. */
\r
4857 if( pxHigherPriorityTaskWoken != NULL )
\r
4859 *pxHigherPriorityTaskWoken = pdTRUE;
\r
4863 /* Mark that a yield is pending in case the user is not
\r
4864 using the "xHigherPriorityTaskWoken" parameter in an ISR
\r
4865 safe FreeRTOS function. */
\r
4866 xYieldPending = pdTRUE;
\r
4871 mtCOVERAGE_TEST_MARKER();
\r
4875 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
4878 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4880 /*-----------------------------------------------------------*/
\r
4882 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4884 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
\r
4887 BaseType_t xReturn;
\r
4889 /* If null is passed in here then it is the calling task that is having
\r
4890 its notification state cleared. */
\r
4891 pxTCB = prvGetTCBFromHandle( xTask );
\r
4893 taskENTER_CRITICAL();
\r
4895 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
\r
4897 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
4905 taskEXIT_CRITICAL();
\r
4910 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4911 /*-----------------------------------------------------------*/
\r
4914 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
\r
4916 TickType_t xTimeToWake;
\r
4917 const TickType_t xConstTickCount = xTickCount;
\r
4919 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
4921 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
\r
4922 reset to pdFALSE so it can be detected as having been set to pdTRUE
\r
4923 when the task leaves the Blocked state. */
\r
4924 pxCurrentTCB->ucDelayAborted = pdFALSE;
\r
4928 /* Remove the task from the ready list before adding it to the blocked list
\r
4929 as the same list item is used for both lists. */
\r
4930 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4932 /* The current task must be in a ready list, so there is no need to
\r
4933 check, and the port reset macro can be called directly. */
\r
4934 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
\r
4938 mtCOVERAGE_TEST_MARKER();
\r
4941 #if ( INCLUDE_vTaskSuspend == 1 )
\r
4943 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
\r
4945 /* Add the task to the suspended task list instead of a delayed task
\r
4946 list to ensure it is not woken by a timing event. It will block
\r
4948 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
4952 /* Calculate the time at which the task should be woken if the event
\r
4953 does not occur. This may overflow but this doesn't matter, the
\r
4954 kernel will manage it correctly. */
\r
4955 xTimeToWake = xConstTickCount + xTicksToWait;
\r
4957 /* The list item will be inserted in wake time order. */
\r
4958 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
\r
4960 if( xTimeToWake < xConstTickCount )
\r
4962 /* Wake time has overflowed. Place this item in the overflow
\r
4964 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
4968 /* The wake time has not overflowed, so the current block list
\r
4970 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
4972 /* If the task entering the blocked state was placed at the
\r
4973 head of the list of blocked tasks then xNextTaskUnblockTime
\r
4974 needs to be updated too. */
\r
4975 if( xTimeToWake < xNextTaskUnblockTime )
\r
4977 xNextTaskUnblockTime = xTimeToWake;
\r
4981 mtCOVERAGE_TEST_MARKER();
\r
4986 #else /* INCLUDE_vTaskSuspend */
\r
4988 /* Calculate the time at which the task should be woken if the event
\r
4989 does not occur. This may overflow but this doesn't matter, the kernel
\r
4990 will manage it correctly. */
\r
4991 xTimeToWake = xConstTickCount + xTicksToWait;
\r
4993 /* The list item will be inserted in wake time order. */
\r
4994 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
\r
4996 if( xTimeToWake < xConstTickCount )
\r
4998 /* Wake time has overflowed. Place this item in the overflow list. */
\r
4999 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5003 /* The wake time has not overflowed, so the current block list is used. */
\r
5004 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5006 /* If the task entering the blocked state was placed at the head of the
\r
5007 list of blocked tasks then xNextTaskUnblockTime needs to be updated
\r
5009 if( xTimeToWake < xNextTaskUnblockTime )
\r
5011 xNextTaskUnblockTime = xTimeToWake;
\r
5015 mtCOVERAGE_TEST_MARKER();
\r
5019 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
\r
5020 ( void ) xCanBlockIndefinitely;
\r
5022 #endif /* INCLUDE_vTaskSuspend */
\r
5025 /* Code below here allows additional code to be inserted into this source file,
\r
5026 especially where access to file scope functions and data is needed (for example
\r
5027 when performing module tests). */
\r
5029 #ifdef FREERTOS_MODULE_TEST
\r
5030 #include "tasks_test_access_functions.h"
\r
5034 #if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
\r
5036 #include "freertos_tasks_c_additions.h"
\r
5038 static void freertos_tasks_c_additions_init( void )
\r
5040 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
5041 FREERTOS_TASKS_C_ADDITIONS_INIT();
\r