2 * FreeRTOS Kernel V10.2.1
\r
3 * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software.
\r
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 * http://www.FreeRTOS.org
\r
23 * http://aws.amazon.com/freertos
\r
25 * 1 tab == 4 spaces!
\r
28 /* Standard includes. */
\r
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
33 all the API functions to use the MPU wrappers. That should only be done when
\r
34 task.h is included from an application file. */
\r
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
37 /* FreeRTOS includes. */
\r
38 #include "FreeRTOS.h"
\r
41 #include "stack_macros.h"
\r
43 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
\r
44 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
\r
45 for the header files above, but not in this file, in order to generate the
\r
46 correct privileged Vs unprivileged linkage and placement. */
\r
47 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
\r
49 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
\r
50 functions but without including stdio.h here. */
\r
51 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
\r
52 /* At the bottom of this file are two optional functions that can be used
\r
53 to generate human readable text from the raw data generated by the
\r
54 uxTaskGetSystemState() function. Note the formatting functions are provided
\r
55 for convenience only, and are NOT considered part of the kernel. */
\r
57 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
\r
59 #if( configUSE_PREEMPTION == 0 )
\r
60 /* If the cooperative scheduler is being used then a yield should not be
\r
61 performed just because a higher priority task has been woken. */
\r
62 #define taskYIELD_IF_USING_PREEMPTION()
\r
64 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
67 /* Values that can be assigned to the ucNotifyState member of the TCB. */
\r
68 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
\r
69 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
\r
70 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
\r
73 * The value used to fill the stack of a task when the task is created. This
\r
74 * is used purely for checking the high water mark for tasks.
\r
76 #define tskSTACK_FILL_BYTE ( 0xa5U )
\r
78 /* Bits used to recored how a task's stack and TCB were allocated. */
\r
79 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
\r
80 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
\r
81 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
\r
83 /* If any of the following are set then task stacks are filled with a known
\r
84 value so the high water mark can be determined. If none of the following are
\r
85 set then don't fill the stack so there is no unnecessary dependency on memset. */
\r
86 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
\r
87 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
\r
89 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
\r
93 * Macros used by vListTask to indicate which state a task is in.
\r
95 #define tskRUNNING_CHAR ( 'X' )
\r
96 #define tskBLOCKED_CHAR ( 'B' )
\r
97 #define tskREADY_CHAR ( 'R' )
\r
98 #define tskDELETED_CHAR ( 'D' )
\r
99 #define tskSUSPENDED_CHAR ( 'S' )
\r
102 * Some kernel aware debuggers require the data the debugger needs access to be
\r
103 * global, rather than file scope.
\r
105 #ifdef portREMOVE_STATIC_QUALIFIER
\r
109 /* The name allocated to the Idle task. This can be overridden by defining
\r
110 configIDLE_TASK_NAME in FreeRTOSConfig.h. */
\r
111 #ifndef configIDLE_TASK_NAME
\r
112 #define configIDLE_TASK_NAME "IDLE"
\r
115 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
\r
117 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
\r
118 performed in a generic way that is not optimised to any particular
\r
119 microcontroller architecture. */
\r
121 /* uxTopReadyPriority holds the priority of the highest priority ready
\r
123 #define taskRECORD_READY_PRIORITY( uxPriority ) \
\r
125 if( ( uxPriority ) > uxTopReadyPriority ) \
\r
127 uxTopReadyPriority = ( uxPriority ); \
\r
129 } /* taskRECORD_READY_PRIORITY */
\r
131 /*-----------------------------------------------------------*/
\r
133 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
\r
135 UBaseType_t uxTopPriority = uxTopReadyPriority; \
\r
137 /* Find the highest priority queue that contains ready tasks. */ \
\r
138 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
\r
140 configASSERT( uxTopPriority ); \
\r
144 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
\r
145 the same priority get an equal share of the processor time. */ \
\r
146 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
\r
147 uxTopReadyPriority = uxTopPriority; \
\r
148 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
\r
150 /*-----------------------------------------------------------*/
\r
152 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
\r
153 they are only required when a port optimised method of task selection is
\r
155 #define taskRESET_READY_PRIORITY( uxPriority )
\r
156 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
\r
158 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
\r
160 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
\r
161 performed in a way that is tailored to the particular microcontroller
\r
162 architecture being used. */
\r
164 /* A port optimised version is provided. Call the port defined macros. */
\r
165 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
\r
167 /*-----------------------------------------------------------*/
\r
169 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
\r
171 UBaseType_t uxTopPriority; \
\r
173 /* Find the highest priority list that contains ready tasks. */ \
\r
174 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
\r
175 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
\r
176 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
\r
177 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
\r
179 /*-----------------------------------------------------------*/
\r
181 /* A port optimised version is provided, call it only if the TCB being reset
\r
182 is being referenced from a ready list. If it is referenced from a delayed
\r
183 or suspended list then it won't be in a ready list. */
\r
184 #define taskRESET_READY_PRIORITY( uxPriority ) \
\r
186 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
\r
188 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
\r
192 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
\r
194 /*-----------------------------------------------------------*/
\r
196 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
\r
197 count overflows. */
\r
198 #define taskSWITCH_DELAYED_LISTS() \
\r
202 /* The delayed tasks list should be empty when the lists are switched. */ \
\r
203 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
\r
205 pxTemp = pxDelayedTaskList; \
\r
206 pxDelayedTaskList = pxOverflowDelayedTaskList; \
\r
207 pxOverflowDelayedTaskList = pxTemp; \
\r
208 xNumOfOverflows++; \
\r
209 prvResetNextTaskUnblockTime(); \
\r
212 /*-----------------------------------------------------------*/
\r
215 * Place the task represented by pxTCB into the appropriate ready list for
\r
216 * the task. It is inserted at the end of the list.
\r
218 #define prvAddTaskToReadyList( pxTCB ) \
\r
219 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
\r
220 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
\r
221 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
\r
222 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
\r
223 /*-----------------------------------------------------------*/
\r
226 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
\r
227 * where NULL is used to indicate that the handle of the currently executing
\r
228 * task should be used in place of the parameter. This macro simply checks to
\r
229 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
\r
231 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
\r
233 /* The item value of the event list item is normally used to hold the priority
\r
234 of the task to which it belongs (coded to allow it to be held in reverse
\r
235 priority order). However, it is occasionally borrowed for other purposes. It
\r
236 is important its value is not updated due to a task priority change while it is
\r
237 being used for another purpose. The following bit definition is used to inform
\r
238 the scheduler that the value should not be changed - in which case it is the
\r
239 responsibility of whichever module is using the value to ensure it gets set back
\r
240 to its original value when it is released. */
\r
241 #if( configUSE_16_BIT_TICKS == 1 )
\r
242 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
\r
244 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
\r
248 * Task control block. A task control block (TCB) is allocated for each task,
\r
249 * and stores task state information, including a pointer to the task's context
\r
250 * (the task's run time environment, including register values)
\r
252 typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
\r
254 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
\r
256 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
257 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
\r
260 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
\r
261 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
\r
262 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
\r
263 StackType_t *pxStack; /*< Points to the start of the stack. */
\r
264 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
266 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
\r
267 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
\r
270 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
271 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
\r
274 #if ( configUSE_TRACE_FACILITY == 1 )
\r
275 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
\r
276 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
\r
279 #if ( configUSE_MUTEXES == 1 )
\r
280 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
\r
281 UBaseType_t uxMutexesHeld;
\r
284 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
285 TaskHookFunction_t pxTaskTag;
\r
288 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
\r
289 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
\r
292 #if( configGENERATE_RUN_TIME_STATS == 1 )
\r
293 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
\r
296 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
297 /* Allocate a Newlib reent structure that is specific to this task.
\r
298 Note Newlib support has been included by popular demand, but is not
\r
299 used by the FreeRTOS maintainers themselves. FreeRTOS is not
\r
300 responsible for resulting newlib operation. User must be familiar with
\r
301 newlib and must provide system-wide implementations of the necessary
\r
302 stubs. Be warned that (at the time of writing) the current newlib design
\r
303 implements a system-wide malloc() that must be provided with locks.
\r
305 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
306 for additional information. */
\r
307 struct _reent xNewLib_reent;
\r
310 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
311 volatile uint32_t ulNotifiedValue;
\r
312 volatile uint8_t ucNotifyState;
\r
315 /* See the comments in FreeRTOS.h with the definition of
\r
316 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
\r
317 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
\r
318 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
\r
321 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
322 uint8_t ucDelayAborted;
\r
325 #if( configUSE_POSIX_ERRNO == 1 )
\r
331 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
\r
332 below to enable the use of older kernel aware debuggers. */
\r
333 typedef tskTCB TCB_t;
\r
335 /*lint -save -e956 A manual analysis and inspection has been used to determine
\r
336 which static variables must be declared volatile. */
\r
337 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
\r
339 /* Lists for ready and blocked tasks. --------------------
\r
340 xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
\r
341 doing so breaks some kernel aware debuggers and debuggers that rely on removing
\r
342 the static qualifier. */
\r
343 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
\r
344 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
\r
345 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
\r
346 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
\r
347 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
\r
348 PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
\r
350 #if( INCLUDE_vTaskDelete == 1 )
\r
352 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
\r
353 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
\r
357 #if ( INCLUDE_vTaskSuspend == 1 )
\r
359 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
\r
363 /* Global POSIX errno. Its value is changed upon context switching to match
\r
364 the errno of the currently running task. */
\r
365 #if ( configUSE_POSIX_ERRNO == 1 )
\r
366 int FreeRTOS_errno = 0;
\r
369 /* Other file private variables. --------------------------------*/
\r
370 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
\r
371 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
\r
372 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
\r
373 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
\r
374 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
\r
375 PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
\r
376 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
\r
377 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
\r
378 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
\r
379 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
\r
381 /* Context switches are held pending while the scheduler is suspended. Also,
\r
382 interrupts must not manipulate the xStateListItem of a TCB, or any of the
\r
383 lists the xStateListItem can be referenced from, if the scheduler is suspended.
\r
384 If an interrupt needs to unblock a task while the scheduler is suspended then it
\r
385 moves the task's event list item into the xPendingReadyList, ready for the
\r
386 kernel to move the task from the pending ready list into the real ready list
\r
387 when the scheduler is unsuspended. The pending ready list itself can only be
\r
388 accessed from a critical section. */
\r
389 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
\r
391 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
393 /* Do not move these variables to function scope as doing so prevents the
\r
394 code working with debuggers that need to remove the static qualifier. */
\r
395 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
\r
396 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
\r
402 /*-----------------------------------------------------------*/
\r
404 /* Callback function prototypes. --------------------------*/
\r
405 #if( configCHECK_FOR_STACK_OVERFLOW > 0 )
\r
407 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
\r
411 #if( configUSE_TICK_HOOK > 0 )
\r
413 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
\r
417 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
419 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
\r
423 /* File private functions. --------------------------------*/
\r
426 * Utility task that simply returns pdTRUE if the task referenced by xTask is
\r
427 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
\r
428 * is in any other state.
\r
430 #if ( INCLUDE_vTaskSuspend == 1 )
\r
432 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
\r
434 #endif /* INCLUDE_vTaskSuspend */
\r
437 * Utility to ready all the lists used by the scheduler. This is called
\r
438 * automatically upon the creation of the first task.
\r
440 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
\r
443 * The idle task, which as all tasks is implemented as a never ending loop.
\r
444 * The idle task is automatically created and added to the ready lists upon
\r
445 * creation of the first user task.
\r
447 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
\r
448 * language extensions. The equivalent prototype for this function is:
\r
450 * void prvIdleTask( void *pvParameters );
\r
453 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
\r
456 * Utility to free all memory allocated by the scheduler to hold a TCB,
\r
457 * including the stack pointed to by the TCB.
\r
459 * This does not free memory allocated by the task itself (i.e. memory
\r
460 * allocated by calls to pvPortMalloc from within the tasks application code).
\r
462 #if ( INCLUDE_vTaskDelete == 1 )
\r
464 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
\r
469 * Used only by the idle task. This checks to see if anything has been placed
\r
470 * in the list of tasks waiting to be deleted. If so the task is cleaned up
\r
471 * and its TCB deleted.
\r
473 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
\r
476 * The currently executing task is entering the Blocked state. Add the task to
\r
477 * either the current or the overflow delayed task list.
\r
479 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
\r
482 * Fills an TaskStatus_t structure with information on each task that is
\r
483 * referenced from the pxList list (which may be a ready list, a delayed list,
\r
484 * a suspended list, etc.).
\r
486 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
\r
487 * NORMAL APPLICATION CODE.
\r
489 #if ( configUSE_TRACE_FACILITY == 1 )
\r
491 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
\r
496 * Searches pxList for a task with name pcNameToQuery - returning a handle to
\r
497 * the task if it is found, or NULL if the task is not found.
\r
499 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
501 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
\r
506 * When a task is created, the stack of the task is filled with a known value.
\r
507 * This function determines the 'high water mark' of the task stack by
\r
508 * determining how much of the stack remains at the original preset value.
\r
510 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
\r
512 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
\r
517 * Return the amount of time, in ticks, that will pass before the kernel will
\r
518 * next move a task from the Blocked state to the Running state.
\r
520 * This conditional compilation should use inequality to 0, not equality to 1.
\r
521 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
\r
522 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
\r
523 * set to a value other than 1.
\r
525 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
527 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
\r
532 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
\r
533 * will exit the Blocked state.
\r
535 static void prvResetNextTaskUnblockTime( void );
\r
537 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
\r
540 * Helper function used to pad task names with spaces when printing out
\r
541 * human readable tables of task information.
\r
543 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
\r
548 * Called after a Task_t structure has been allocated either statically or
\r
549 * dynamically to fill in the structure's members.
\r
551 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
\r
552 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
553 const uint32_t ulStackDepth,
\r
554 void * const pvParameters,
\r
555 UBaseType_t uxPriority,
\r
556 TaskHandle_t * const pxCreatedTask,
\r
558 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
\r
561 * Called after a new task has been created and initialised to place the task
\r
562 * under the control of the scheduler.
\r
564 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
\r
567 * freertos_tasks_c_additions_init() should only be called if the user definable
\r
568 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
\r
569 * called by the function.
\r
571 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
573 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
\r
577 /*-----------------------------------------------------------*/
\r
579 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
581 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
\r
582 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
583 const uint32_t ulStackDepth,
\r
584 void * const pvParameters,
\r
585 UBaseType_t uxPriority,
\r
586 StackType_t * const puxStackBuffer,
\r
587 StaticTask_t * const pxTaskBuffer )
\r
590 TaskHandle_t xReturn;
\r
592 configASSERT( puxStackBuffer != NULL );
\r
593 configASSERT( pxTaskBuffer != NULL );
\r
595 #if( configASSERT_DEFINED == 1 )
\r
597 /* Sanity check that the size of the structure used to declare a
\r
598 variable of type StaticTask_t equals the size of the real task
\r
600 volatile size_t xSize = sizeof( StaticTask_t );
\r
601 configASSERT( xSize == sizeof( TCB_t ) );
\r
602 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
\r
604 #endif /* configASSERT_DEFINED */
\r
607 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
\r
609 /* The memory used for the task's TCB and stack are passed into this
\r
610 function - use them. */
\r
611 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
612 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
\r
614 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
\r
616 /* Tasks can be created statically or dynamically, so note this
\r
617 task was created statically in case the task is later deleted. */
\r
618 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
\r
620 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
622 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
\r
623 prvAddNewTaskToReadyList( pxNewTCB );
\r
633 #endif /* SUPPORT_STATIC_ALLOCATION */
\r
634 /*-----------------------------------------------------------*/
\r
636 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
638 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
\r
641 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
643 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
\r
644 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
\r
646 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
\r
648 /* Allocate space for the TCB. Where the memory comes from depends
\r
649 on the implementation of the port malloc function and whether or
\r
650 not static allocation is being used. */
\r
651 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
\r
653 /* Store the stack location in the TCB. */
\r
654 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
\r
656 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
\r
658 /* Tasks can be created statically or dynamically, so note this
\r
659 task was created statically in case the task is later deleted. */
\r
660 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
\r
662 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
664 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
\r
665 pxTaskDefinition->pcName,
\r
666 ( uint32_t ) pxTaskDefinition->usStackDepth,
\r
667 pxTaskDefinition->pvParameters,
\r
668 pxTaskDefinition->uxPriority,
\r
669 pxCreatedTask, pxNewTCB,
\r
670 pxTaskDefinition->xRegions );
\r
672 prvAddNewTaskToReadyList( pxNewTCB );
\r
679 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
\r
680 /*-----------------------------------------------------------*/
\r
682 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
684 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
\r
687 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
689 configASSERT( pxTaskDefinition->puxStackBuffer );
\r
691 if( pxTaskDefinition->puxStackBuffer != NULL )
\r
693 /* Allocate space for the TCB. Where the memory comes from depends
\r
694 on the implementation of the port malloc function and whether or
\r
695 not static allocation is being used. */
\r
696 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
\r
698 if( pxNewTCB != NULL )
\r
700 /* Store the stack location in the TCB. */
\r
701 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
\r
703 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
\r
705 /* Tasks can be created statically or dynamically, so note
\r
706 this task had a statically allocated stack in case it is
\r
707 later deleted. The TCB was allocated dynamically. */
\r
708 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
\r
710 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
712 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
\r
713 pxTaskDefinition->pcName,
\r
714 ( uint32_t ) pxTaskDefinition->usStackDepth,
\r
715 pxTaskDefinition->pvParameters,
\r
716 pxTaskDefinition->uxPriority,
\r
717 pxCreatedTask, pxNewTCB,
\r
718 pxTaskDefinition->xRegions );
\r
720 prvAddNewTaskToReadyList( pxNewTCB );
\r
728 #endif /* portUSING_MPU_WRAPPERS */
\r
729 /*-----------------------------------------------------------*/
\r
731 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
733 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
\r
734 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
735 const configSTACK_DEPTH_TYPE usStackDepth,
\r
736 void * const pvParameters,
\r
737 UBaseType_t uxPriority,
\r
738 TaskHandle_t * const pxCreatedTask )
\r
741 BaseType_t xReturn;
\r
743 /* If the stack grows down then allocate the stack then the TCB so the stack
\r
744 does not grow into the TCB. Likewise if the stack grows up then allocate
\r
745 the TCB then the stack. */
\r
746 #if( portSTACK_GROWTH > 0 )
\r
748 /* Allocate space for the TCB. Where the memory comes from depends on
\r
749 the implementation of the port malloc function and whether or not static
\r
750 allocation is being used. */
\r
751 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
\r
753 if( pxNewTCB != NULL )
\r
755 /* Allocate space for the stack used by the task being created.
\r
756 The base of the stack memory stored in the TCB so the task can
\r
757 be deleted later if required. */
\r
758 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
760 if( pxNewTCB->pxStack == NULL )
\r
762 /* Could not allocate the stack. Delete the allocated TCB. */
\r
763 vPortFree( pxNewTCB );
\r
768 #else /* portSTACK_GROWTH */
\r
770 StackType_t *pxStack;
\r
772 /* Allocate space for the stack used by the task being created. */
\r
773 pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
\r
775 if( pxStack != NULL )
\r
777 /* Allocate space for the TCB. */
\r
778 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
\r
780 if( pxNewTCB != NULL )
\r
782 /* Store the stack location in the TCB. */
\r
783 pxNewTCB->pxStack = pxStack;
\r
787 /* The stack cannot be used as the TCB was not created. Free
\r
789 vPortFree( pxStack );
\r
797 #endif /* portSTACK_GROWTH */
\r
799 if( pxNewTCB != NULL )
\r
801 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
\r
803 /* Tasks can be created statically or dynamically, so note this
\r
804 task was created dynamically in case it is later deleted. */
\r
805 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
\r
807 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
809 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
\r
810 prvAddNewTaskToReadyList( pxNewTCB );
\r
815 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
821 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
822 /*-----------------------------------------------------------*/
\r
824 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
\r
825 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
826 const uint32_t ulStackDepth,
\r
827 void * const pvParameters,
\r
828 UBaseType_t uxPriority,
\r
829 TaskHandle_t * const pxCreatedTask,
\r
831 const MemoryRegion_t * const xRegions )
\r
833 StackType_t *pxTopOfStack;
\r
836 #if( portUSING_MPU_WRAPPERS == 1 )
\r
837 /* Should the task be created in privileged mode? */
\r
838 BaseType_t xRunPrivileged;
\r
839 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
\r
841 xRunPrivileged = pdTRUE;
\r
845 xRunPrivileged = pdFALSE;
\r
847 uxPriority &= ~portPRIVILEGE_BIT;
\r
848 #endif /* portUSING_MPU_WRAPPERS == 1 */
\r
850 /* Avoid dependency on memset() if it is not required. */
\r
851 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
\r
853 /* Fill the stack with a known value to assist debugging. */
\r
854 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
\r
856 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
\r
858 /* Calculate the top of stack address. This depends on whether the stack
\r
859 grows from high memory to low (as per the 80x86) or vice versa.
\r
860 portSTACK_GROWTH is used to make the result positive or negative as required
\r
862 #if( portSTACK_GROWTH < 0 )
\r
864 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
\r
865 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
\r
867 /* Check the alignment of the calculated top of stack is correct. */
\r
868 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
\r
870 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
\r
872 /* Also record the stack's high address, which may assist
\r
874 pxNewTCB->pxEndOfStack = pxTopOfStack;
\r
876 #endif /* configRECORD_STACK_HIGH_ADDRESS */
\r
878 #else /* portSTACK_GROWTH */
\r
880 pxTopOfStack = pxNewTCB->pxStack;
\r
882 /* Check the alignment of the stack buffer is correct. */
\r
883 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
\r
885 /* The other extreme of the stack space is required if stack checking is
\r
887 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
\r
889 #endif /* portSTACK_GROWTH */
\r
891 /* Store the task name in the TCB. */
\r
892 if( pcName != NULL )
\r
894 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
\r
896 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
\r
898 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
\r
899 configMAX_TASK_NAME_LEN characters just in case the memory after the
\r
900 string is not accessible (extremely unlikely). */
\r
901 if( pcName[ x ] == ( char ) 0x00 )
\r
907 mtCOVERAGE_TEST_MARKER();
\r
911 /* Ensure the name string is terminated in the case that the string length
\r
912 was greater or equal to configMAX_TASK_NAME_LEN. */
\r
913 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
\r
917 /* The task has not been given a name, so just ensure there is a NULL
\r
918 terminator when it is read out. */
\r
919 pxNewTCB->pcTaskName[ 0 ] = 0x00;
\r
922 /* This is used as an array index so must ensure it's not too large. First
\r
923 remove the privilege bit if one is present. */
\r
924 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
\r
926 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
\r
930 mtCOVERAGE_TEST_MARKER();
\r
933 pxNewTCB->uxPriority = uxPriority;
\r
934 #if ( configUSE_MUTEXES == 1 )
\r
936 pxNewTCB->uxBasePriority = uxPriority;
\r
937 pxNewTCB->uxMutexesHeld = 0;
\r
939 #endif /* configUSE_MUTEXES */
\r
941 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
\r
942 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
\r
944 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
\r
945 back to the containing TCB from a generic item in a list. */
\r
946 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
\r
948 /* Event lists are always in priority order. */
\r
949 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
950 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
\r
952 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
954 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
\r
956 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
958 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
960 pxNewTCB->pxTaskTag = NULL;
\r
962 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
964 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
966 pxNewTCB->ulRunTimeCounter = 0UL;
\r
968 #endif /* configGENERATE_RUN_TIME_STATS */
\r
970 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
972 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
\r
976 /* Avoid compiler warning about unreferenced parameter. */
\r
981 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
983 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
\r
985 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
\r
990 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
\r
992 pxNewTCB->ulNotifiedValue = 0;
\r
993 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
997 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
999 /* Initialise this task's Newlib reent structure.
\r
1000 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
1001 for additional information. */
\r
1002 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
\r
1006 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
1008 pxNewTCB->ucDelayAborted = pdFALSE;
\r
1012 /* Initialize the TCB stack to look as if the task was already running,
\r
1013 but had been interrupted by the scheduler. The return address is set
\r
1014 to the start of the task function. Once the stack has been initialised
\r
1015 the top of stack variable is updated. */
\r
1016 #if( portUSING_MPU_WRAPPERS == 1 )
\r
1018 /* If the port has capability to detect stack overflow,
\r
1019 pass the stack end address to the stack initialization
\r
1020 function as well. */
\r
1021 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
\r
1023 #if( portSTACK_GROWTH < 0 )
\r
1025 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1027 #else /* portSTACK_GROWTH */
\r
1029 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1031 #endif /* portSTACK_GROWTH */
\r
1033 #else /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1035 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1037 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1039 #else /* portUSING_MPU_WRAPPERS */
\r
1041 /* If the port has capability to detect stack overflow,
\r
1042 pass the stack end address to the stack initialization
\r
1043 function as well. */
\r
1044 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
\r
1046 #if( portSTACK_GROWTH < 0 )
\r
1048 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
\r
1050 #else /* portSTACK_GROWTH */
\r
1052 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
\r
1054 #endif /* portSTACK_GROWTH */
\r
1056 #else /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1058 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
\r
1060 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1062 #endif /* portUSING_MPU_WRAPPERS */
\r
1064 if( pxCreatedTask != NULL )
\r
1066 /* Pass the handle out in an anonymous way. The handle can be used to
\r
1067 change the created task's priority, delete the created task, etc.*/
\r
1068 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
\r
1072 mtCOVERAGE_TEST_MARKER();
\r
1075 /*-----------------------------------------------------------*/
\r
1077 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
\r
1079 /* Ensure interrupts don't access the task lists while the lists are being
\r
1081 taskENTER_CRITICAL();
\r
1083 uxCurrentNumberOfTasks++;
\r
1084 if( pxCurrentTCB == NULL )
\r
1086 /* There are no other tasks, or all the other tasks are in
\r
1087 the suspended state - make this the current task. */
\r
1088 pxCurrentTCB = pxNewTCB;
\r
1090 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
\r
1092 /* This is the first task to be created so do the preliminary
\r
1093 initialisation required. We will not recover if this call
\r
1094 fails, but we will report the failure. */
\r
1095 prvInitialiseTaskLists();
\r
1099 mtCOVERAGE_TEST_MARKER();
\r
1104 /* If the scheduler is not already running, make this task the
\r
1105 current task if it is the highest priority task to be created
\r
1107 if( xSchedulerRunning == pdFALSE )
\r
1109 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
\r
1111 pxCurrentTCB = pxNewTCB;
\r
1115 mtCOVERAGE_TEST_MARKER();
\r
1120 mtCOVERAGE_TEST_MARKER();
\r
1126 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1128 /* Add a counter into the TCB for tracing only. */
\r
1129 pxNewTCB->uxTCBNumber = uxTaskNumber;
\r
1131 #endif /* configUSE_TRACE_FACILITY */
\r
1132 traceTASK_CREATE( pxNewTCB );
\r
1134 prvAddTaskToReadyList( pxNewTCB );
\r
1136 portSETUP_TCB( pxNewTCB );
\r
1138 taskEXIT_CRITICAL();
\r
1140 if( xSchedulerRunning != pdFALSE )
\r
1142 /* If the created task is of a higher priority than the current task
\r
1143 then it should run now. */
\r
1144 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
\r
1146 taskYIELD_IF_USING_PREEMPTION();
\r
1150 mtCOVERAGE_TEST_MARKER();
\r
1155 mtCOVERAGE_TEST_MARKER();
\r
1158 /*-----------------------------------------------------------*/
\r
1160 #if ( INCLUDE_vTaskDelete == 1 )
\r
1162 void vTaskDelete( TaskHandle_t xTaskToDelete )
\r
1166 taskENTER_CRITICAL();
\r
1168 /* If null is passed in here then it is the calling task that is
\r
1170 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
\r
1172 /* Remove task from the ready/delayed list. */
\r
1173 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1175 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
1179 mtCOVERAGE_TEST_MARKER();
\r
1182 /* Is the task waiting on an event also? */
\r
1183 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
1185 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
1189 mtCOVERAGE_TEST_MARKER();
\r
1192 /* Increment the uxTaskNumber also so kernel aware debuggers can
\r
1193 detect that the task lists need re-generating. This is done before
\r
1194 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
\r
1198 if( pxTCB == pxCurrentTCB )
\r
1200 /* A task is deleting itself. This cannot complete within the
\r
1201 task itself, as a context switch to another task is required.
\r
1202 Place the task in the termination list. The idle task will
\r
1203 check the termination list and free up any memory allocated by
\r
1204 the scheduler for the TCB and stack of the deleted task. */
\r
1205 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
\r
1207 /* Increment the ucTasksDeleted variable so the idle task knows
\r
1208 there is a task that has been deleted and that it should therefore
\r
1209 check the xTasksWaitingTermination list. */
\r
1210 ++uxDeletedTasksWaitingCleanUp;
\r
1212 /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
\r
1213 portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
\r
1214 traceTASK_DELETE( pxTCB );
\r
1216 /* The pre-delete hook is primarily for the Windows simulator,
\r
1217 in which Windows specific clean up operations are performed,
\r
1218 after which it is not possible to yield away from this task -
\r
1219 hence xYieldPending is used to latch that a context switch is
\r
1221 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
\r
1225 --uxCurrentNumberOfTasks;
\r
1226 traceTASK_DELETE( pxTCB );
\r
1227 prvDeleteTCB( pxTCB );
\r
1229 /* Reset the next expected unblock time in case it referred to
\r
1230 the task that has just been deleted. */
\r
1231 prvResetNextTaskUnblockTime();
\r
1234 taskEXIT_CRITICAL();
\r
1236 /* Force a reschedule if it is the currently running task that has just
\r
1238 if( xSchedulerRunning != pdFALSE )
\r
1240 if( pxTCB == pxCurrentTCB )
\r
1242 configASSERT( uxSchedulerSuspended == 0 );
\r
1243 portYIELD_WITHIN_API();
\r
1247 mtCOVERAGE_TEST_MARKER();
\r
1252 #endif /* INCLUDE_vTaskDelete */
\r
1253 /*-----------------------------------------------------------*/
\r
1255 #if ( INCLUDE_vTaskDelayUntil == 1 )
\r
1257 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
\r
1259 TickType_t xTimeToWake;
\r
1260 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
\r
1262 configASSERT( pxPreviousWakeTime );
\r
1263 configASSERT( ( xTimeIncrement > 0U ) );
\r
1264 configASSERT( uxSchedulerSuspended == 0 );
\r
1266 vTaskSuspendAll();
\r
1268 /* Minor optimisation. The tick count cannot change in this
\r
1270 const TickType_t xConstTickCount = xTickCount;
\r
1272 /* Generate the tick time at which the task wants to wake. */
\r
1273 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
\r
1275 if( xConstTickCount < *pxPreviousWakeTime )
\r
1277 /* The tick count has overflowed since this function was
\r
1278 lasted called. In this case the only time we should ever
\r
1279 actually delay is if the wake time has also overflowed,
\r
1280 and the wake time is greater than the tick time. When this
\r
1281 is the case it is as if neither time had overflowed. */
\r
1282 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
\r
1284 xShouldDelay = pdTRUE;
\r
1288 mtCOVERAGE_TEST_MARKER();
\r
1293 /* The tick time has not overflowed. In this case we will
\r
1294 delay if either the wake time has overflowed, and/or the
\r
1295 tick time is less than the wake time. */
\r
1296 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
\r
1298 xShouldDelay = pdTRUE;
\r
1302 mtCOVERAGE_TEST_MARKER();
\r
1306 /* Update the wake time ready for the next call. */
\r
1307 *pxPreviousWakeTime = xTimeToWake;
\r
1309 if( xShouldDelay != pdFALSE )
\r
1311 traceTASK_DELAY_UNTIL( xTimeToWake );
\r
1313 /* prvAddCurrentTaskToDelayedList() needs the block time, not
\r
1314 the time to wake, so subtract the current tick count. */
\r
1315 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
\r
1319 mtCOVERAGE_TEST_MARKER();
\r
1322 xAlreadyYielded = xTaskResumeAll();
\r
1324 /* Force a reschedule if xTaskResumeAll has not already done so, we may
\r
1325 have put ourselves to sleep. */
\r
1326 if( xAlreadyYielded == pdFALSE )
\r
1328 portYIELD_WITHIN_API();
\r
1332 mtCOVERAGE_TEST_MARKER();
\r
1336 #endif /* INCLUDE_vTaskDelayUntil */
\r
1337 /*-----------------------------------------------------------*/
\r
1339 #if ( INCLUDE_vTaskDelay == 1 )
\r
1341 void vTaskDelay( const TickType_t xTicksToDelay )
\r
1343 BaseType_t xAlreadyYielded = pdFALSE;
\r
1345 /* A delay time of zero just forces a reschedule. */
\r
1346 if( xTicksToDelay > ( TickType_t ) 0U )
\r
1348 configASSERT( uxSchedulerSuspended == 0 );
\r
1349 vTaskSuspendAll();
\r
1351 traceTASK_DELAY();
\r
1353 /* A task that is removed from the event list while the
\r
1354 scheduler is suspended will not get placed in the ready
\r
1355 list or removed from the blocked list until the scheduler
\r
1358 This task cannot be in an event list as it is the currently
\r
1359 executing task. */
\r
1360 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
\r
1362 xAlreadyYielded = xTaskResumeAll();
\r
1366 mtCOVERAGE_TEST_MARKER();
\r
1369 /* Force a reschedule if xTaskResumeAll has not already done so, we may
\r
1370 have put ourselves to sleep. */
\r
1371 if( xAlreadyYielded == pdFALSE )
\r
1373 portYIELD_WITHIN_API();
\r
1377 mtCOVERAGE_TEST_MARKER();
\r
1381 #endif /* INCLUDE_vTaskDelay */
\r
1382 /*-----------------------------------------------------------*/
\r
1384 #if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
\r
1386 eTaskState eTaskGetState( TaskHandle_t xTask )
\r
1388 eTaskState eReturn;
\r
1389 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
\r
1390 const TCB_t * const pxTCB = xTask;
\r
1392 configASSERT( pxTCB );
\r
1394 if( pxTCB == pxCurrentTCB )
\r
1396 /* The task calling this function is querying its own state. */
\r
1397 eReturn = eRunning;
\r
1401 taskENTER_CRITICAL();
\r
1403 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
\r
1404 pxDelayedList = pxDelayedTaskList;
\r
1405 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
\r
1407 taskEXIT_CRITICAL();
\r
1409 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
\r
1411 /* The task being queried is referenced from one of the Blocked
\r
1413 eReturn = eBlocked;
\r
1416 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1417 else if( pxStateList == &xSuspendedTaskList )
\r
1419 /* The task being queried is referenced from the suspended
\r
1420 list. Is it genuinely suspended or is it blocked
\r
1422 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
\r
1424 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
1426 /* The task does not appear on the event list item of
\r
1427 and of the RTOS objects, but could still be in the
\r
1428 blocked state if it is waiting on its notification
\r
1429 rather than waiting on an object. */
\r
1430 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
\r
1432 eReturn = eBlocked;
\r
1436 eReturn = eSuspended;
\r
1441 eReturn = eSuspended;
\r
1447 eReturn = eBlocked;
\r
1452 #if ( INCLUDE_vTaskDelete == 1 )
\r
1453 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
\r
1455 /* The task being queried is referenced from the deleted
\r
1456 tasks list, or it is not referenced from any lists at
\r
1458 eReturn = eDeleted;
\r
1462 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
\r
1464 /* If the task is not in any other state, it must be in the
\r
1465 Ready (including pending ready) state. */
\r
1471 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
\r
1473 #endif /* INCLUDE_eTaskGetState */
\r
1474 /*-----------------------------------------------------------*/
\r
1476 #if ( INCLUDE_uxTaskPriorityGet == 1 )
\r
1478 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
\r
1480 TCB_t const *pxTCB;
\r
1481 UBaseType_t uxReturn;
\r
1483 taskENTER_CRITICAL();
\r
1485 /* If null is passed in here then it is the priority of the task
\r
1486 that called uxTaskPriorityGet() that is being queried. */
\r
1487 pxTCB = prvGetTCBFromHandle( xTask );
\r
1488 uxReturn = pxTCB->uxPriority;
\r
1490 taskEXIT_CRITICAL();
\r
1495 #endif /* INCLUDE_uxTaskPriorityGet */
\r
1496 /*-----------------------------------------------------------*/
\r
1498 #if ( INCLUDE_uxTaskPriorityGet == 1 )
\r
1500 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
\r
1502 TCB_t const *pxTCB;
\r
1503 UBaseType_t uxReturn, uxSavedInterruptState;
\r
1505 /* RTOS ports that support interrupt nesting have the concept of a
\r
1506 maximum system call (or maximum API call) interrupt priority.
\r
1507 Interrupts that are above the maximum system call priority are keep
\r
1508 permanently enabled, even when the RTOS kernel is in a critical section,
\r
1509 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
1510 is defined in FreeRTOSConfig.h then
\r
1511 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1512 failure if a FreeRTOS API function is called from an interrupt that has
\r
1513 been assigned a priority above the configured maximum system call
\r
1514 priority. Only FreeRTOS functions that end in FromISR can be called
\r
1515 from interrupts that have been assigned a priority at or (logically)
\r
1516 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
1517 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
1518 simple as possible. More information (albeit Cortex-M specific) is
\r
1519 provided on the following link:
\r
1520 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1521 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1523 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1525 /* If null is passed in here then it is the priority of the calling
\r
1526 task that is being queried. */
\r
1527 pxTCB = prvGetTCBFromHandle( xTask );
\r
1528 uxReturn = pxTCB->uxPriority;
\r
1530 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
\r
1535 #endif /* INCLUDE_uxTaskPriorityGet */
\r
1536 /*-----------------------------------------------------------*/
\r
1538 #if ( INCLUDE_vTaskPrioritySet == 1 )
\r
1540 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
\r
1543 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
\r
1544 BaseType_t xYieldRequired = pdFALSE;
\r
1546 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
\r
1548 /* Ensure the new priority is valid. */
\r
1549 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
\r
1551 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
\r
1555 mtCOVERAGE_TEST_MARKER();
\r
1558 taskENTER_CRITICAL();
\r
1560 /* If null is passed in here then it is the priority of the calling
\r
1561 task that is being changed. */
\r
1562 pxTCB = prvGetTCBFromHandle( xTask );
\r
1564 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
\r
1566 #if ( configUSE_MUTEXES == 1 )
\r
1568 uxCurrentBasePriority = pxTCB->uxBasePriority;
\r
1572 uxCurrentBasePriority = pxTCB->uxPriority;
\r
1576 if( uxCurrentBasePriority != uxNewPriority )
\r
1578 /* The priority change may have readied a task of higher
\r
1579 priority than the calling task. */
\r
1580 if( uxNewPriority > uxCurrentBasePriority )
\r
1582 if( pxTCB != pxCurrentTCB )
\r
1584 /* The priority of a task other than the currently
\r
1585 running task is being raised. Is the priority being
\r
1586 raised above that of the running task? */
\r
1587 if( uxNewPriority >= pxCurrentTCB->uxPriority )
\r
1589 xYieldRequired = pdTRUE;
\r
1593 mtCOVERAGE_TEST_MARKER();
\r
1598 /* The priority of the running task is being raised,
\r
1599 but the running task must already be the highest
\r
1600 priority task able to run so no yield is required. */
\r
1603 else if( pxTCB == pxCurrentTCB )
\r
1605 /* Setting the priority of the running task down means
\r
1606 there may now be another task of higher priority that
\r
1607 is ready to execute. */
\r
1608 xYieldRequired = pdTRUE;
\r
1612 /* Setting the priority of any other task down does not
\r
1613 require a yield as the running task must be above the
\r
1614 new priority of the task being modified. */
\r
1617 /* Remember the ready list the task might be referenced from
\r
1618 before its uxPriority member is changed so the
\r
1619 taskRESET_READY_PRIORITY() macro can function correctly. */
\r
1620 uxPriorityUsedOnEntry = pxTCB->uxPriority;
\r
1622 #if ( configUSE_MUTEXES == 1 )
\r
1624 /* Only change the priority being used if the task is not
\r
1625 currently using an inherited priority. */
\r
1626 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
\r
1628 pxTCB->uxPriority = uxNewPriority;
\r
1632 mtCOVERAGE_TEST_MARKER();
\r
1635 /* The base priority gets set whatever. */
\r
1636 pxTCB->uxBasePriority = uxNewPriority;
\r
1640 pxTCB->uxPriority = uxNewPriority;
\r
1644 /* Only reset the event list item value if the value is not
\r
1645 being used for anything else. */
\r
1646 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
1648 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1652 mtCOVERAGE_TEST_MARKER();
\r
1655 /* If the task is in the blocked or suspended list we need do
\r
1656 nothing more than change its priority variable. However, if
\r
1657 the task is in a ready list it needs to be removed and placed
\r
1658 in the list appropriate to its new priority. */
\r
1659 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
1661 /* The task is currently in its ready list - remove before
\r
1662 adding it to it's new ready list. As we are in a critical
\r
1663 section we can do this even if the scheduler is suspended. */
\r
1664 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1666 /* It is known that the task is in its ready list so
\r
1667 there is no need to check again and the port level
\r
1668 reset macro can be called directly. */
\r
1669 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
\r
1673 mtCOVERAGE_TEST_MARKER();
\r
1675 prvAddTaskToReadyList( pxTCB );
\r
1679 mtCOVERAGE_TEST_MARKER();
\r
1682 if( xYieldRequired != pdFALSE )
\r
1684 taskYIELD_IF_USING_PREEMPTION();
\r
1688 mtCOVERAGE_TEST_MARKER();
\r
1691 /* Remove compiler warning about unused variables when the port
\r
1692 optimised task selection is not being used. */
\r
1693 ( void ) uxPriorityUsedOnEntry;
\r
1696 taskEXIT_CRITICAL();
\r
1699 #endif /* INCLUDE_vTaskPrioritySet */
\r
1700 /*-----------------------------------------------------------*/
\r
1702 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1704 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
\r
1708 taskENTER_CRITICAL();
\r
1710 /* If null is passed in here then it is the running task that is
\r
1711 being suspended. */
\r
1712 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
\r
1714 traceTASK_SUSPEND( pxTCB );
\r
1716 /* Remove task from the ready/delayed list and place in the
\r
1717 suspended list. */
\r
1718 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1720 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
1724 mtCOVERAGE_TEST_MARKER();
\r
1727 /* Is the task waiting on an event also? */
\r
1728 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
1730 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
1734 mtCOVERAGE_TEST_MARKER();
\r
1737 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
\r
1739 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
1741 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
\r
1743 /* The task was blocked to wait for a notification, but is
\r
1744 now suspended, so no notification was received. */
\r
1745 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
1750 taskEXIT_CRITICAL();
\r
1752 if( xSchedulerRunning != pdFALSE )
\r
1754 /* Reset the next expected unblock time in case it referred to the
\r
1755 task that is now in the Suspended state. */
\r
1756 taskENTER_CRITICAL();
\r
1758 prvResetNextTaskUnblockTime();
\r
1760 taskEXIT_CRITICAL();
\r
1764 mtCOVERAGE_TEST_MARKER();
\r
1767 if( pxTCB == pxCurrentTCB )
\r
1769 if( xSchedulerRunning != pdFALSE )
\r
1771 /* The current task has just been suspended. */
\r
1772 configASSERT( uxSchedulerSuspended == 0 );
\r
1773 portYIELD_WITHIN_API();
\r
1777 /* The scheduler is not running, but the task that was pointed
\r
1778 to by pxCurrentTCB has just been suspended and pxCurrentTCB
\r
1779 must be adjusted to point to a different task. */
\r
1780 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
\r
1782 /* No other tasks are ready, so set pxCurrentTCB back to
\r
1783 NULL so when the next task is created pxCurrentTCB will
\r
1784 be set to point to it no matter what its relative priority
\r
1786 pxCurrentTCB = NULL;
\r
1790 vTaskSwitchContext();
\r
1796 mtCOVERAGE_TEST_MARKER();
\r
1800 #endif /* INCLUDE_vTaskSuspend */
\r
1801 /*-----------------------------------------------------------*/
\r
1803 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1805 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
\r
1807 BaseType_t xReturn = pdFALSE;
\r
1808 const TCB_t * const pxTCB = xTask;
\r
1810 /* Accesses xPendingReadyList so must be called from a critical
\r
1813 /* It does not make sense to check if the calling task is suspended. */
\r
1814 configASSERT( xTask );
\r
1816 /* Is the task being resumed actually in the suspended list? */
\r
1817 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
1819 /* Has the task already been resumed from within an ISR? */
\r
1820 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
\r
1822 /* Is it in the suspended list because it is in the Suspended
\r
1823 state, or because is is blocked with no timeout? */
\r
1824 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
\r
1830 mtCOVERAGE_TEST_MARKER();
\r
1835 mtCOVERAGE_TEST_MARKER();
\r
1840 mtCOVERAGE_TEST_MARKER();
\r
1844 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
\r
1846 #endif /* INCLUDE_vTaskSuspend */
\r
1847 /*-----------------------------------------------------------*/
\r
1849 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1851 void vTaskResume( TaskHandle_t xTaskToResume )
\r
1853 TCB_t * const pxTCB = xTaskToResume;
\r
1855 /* It does not make sense to resume the calling task. */
\r
1856 configASSERT( xTaskToResume );
\r
1858 /* The parameter cannot be NULL as it is impossible to resume the
\r
1859 currently executing task. */
\r
1860 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
\r
1862 taskENTER_CRITICAL();
\r
1864 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
\r
1866 traceTASK_RESUME( pxTCB );
\r
1868 /* The ready list can be accessed even if the scheduler is
\r
1869 suspended because this is inside a critical section. */
\r
1870 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
1871 prvAddTaskToReadyList( pxTCB );
\r
1873 /* A higher priority task may have just been resumed. */
\r
1874 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
1876 /* This yield may not cause the task just resumed to run,
\r
1877 but will leave the lists in the correct state for the
\r
1879 taskYIELD_IF_USING_PREEMPTION();
\r
1883 mtCOVERAGE_TEST_MARKER();
\r
1888 mtCOVERAGE_TEST_MARKER();
\r
1891 taskEXIT_CRITICAL();
\r
1895 mtCOVERAGE_TEST_MARKER();
\r
1899 #endif /* INCLUDE_vTaskSuspend */
\r
1901 /*-----------------------------------------------------------*/
\r
1903 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
\r
1905 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
\r
1907 BaseType_t xYieldRequired = pdFALSE;
\r
1908 TCB_t * const pxTCB = xTaskToResume;
\r
1909 UBaseType_t uxSavedInterruptStatus;
\r
1911 configASSERT( xTaskToResume );
\r
1913 /* RTOS ports that support interrupt nesting have the concept of a
\r
1914 maximum system call (or maximum API call) interrupt priority.
\r
1915 Interrupts that are above the maximum system call priority are keep
\r
1916 permanently enabled, even when the RTOS kernel is in a critical section,
\r
1917 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
1918 is defined in FreeRTOSConfig.h then
\r
1919 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1920 failure if a FreeRTOS API function is called from an interrupt that has
\r
1921 been assigned a priority above the configured maximum system call
\r
1922 priority. Only FreeRTOS functions that end in FromISR can be called
\r
1923 from interrupts that have been assigned a priority at or (logically)
\r
1924 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
1925 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
1926 simple as possible. More information (albeit Cortex-M specific) is
\r
1927 provided on the following link:
\r
1928 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1929 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1931 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1933 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
\r
1935 traceTASK_RESUME_FROM_ISR( pxTCB );
\r
1937 /* Check the ready lists can be accessed. */
\r
1938 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
1940 /* Ready lists can be accessed so move the task from the
\r
1941 suspended list to the ready list directly. */
\r
1942 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
1944 xYieldRequired = pdTRUE;
\r
1948 mtCOVERAGE_TEST_MARKER();
\r
1951 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
1952 prvAddTaskToReadyList( pxTCB );
\r
1956 /* The delayed or ready lists cannot be accessed so the task
\r
1957 is held in the pending ready list until the scheduler is
\r
1959 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
1964 mtCOVERAGE_TEST_MARKER();
\r
1967 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1969 return xYieldRequired;
\r
1972 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
\r
1973 /*-----------------------------------------------------------*/
\r
1975 void vTaskStartScheduler( void )
\r
1977 BaseType_t xReturn;
\r
1979 /* Add the idle task at the lowest priority. */
\r
1980 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
1982 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
\r
1983 StackType_t *pxIdleTaskStackBuffer = NULL;
\r
1984 uint32_t ulIdleTaskStackSize;
\r
1986 /* The Idle task is created using user provided RAM - obtain the
\r
1987 address of the RAM then create the idle task. */
\r
1988 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
\r
1989 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
\r
1990 configIDLE_TASK_NAME,
\r
1991 ulIdleTaskStackSize,
\r
1992 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
\r
1993 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
\r
1994 pxIdleTaskStackBuffer,
\r
1995 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
\r
1997 if( xIdleTaskHandle != NULL )
\r
2008 /* The Idle task is being created using dynamically allocated RAM. */
\r
2009 xReturn = xTaskCreate( prvIdleTask,
\r
2010 configIDLE_TASK_NAME,
\r
2011 configMINIMAL_STACK_SIZE,
\r
2013 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
\r
2014 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
\r
2016 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
2018 #if ( configUSE_TIMERS == 1 )
\r
2020 if( xReturn == pdPASS )
\r
2022 xReturn = xTimerCreateTimerTask();
\r
2026 mtCOVERAGE_TEST_MARKER();
\r
2029 #endif /* configUSE_TIMERS */
\r
2031 if( xReturn == pdPASS )
\r
2033 /* freertos_tasks_c_additions_init() should only be called if the user
\r
2034 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
\r
2035 the only macro called by the function. */
\r
2036 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
2038 freertos_tasks_c_additions_init();
\r
2042 /* Interrupts are turned off here, to ensure a tick does not occur
\r
2043 before or during the call to xPortStartScheduler(). The stacks of
\r
2044 the created tasks contain a status word with interrupts switched on
\r
2045 so interrupts will automatically get re-enabled when the first task
\r
2047 portDISABLE_INTERRUPTS();
\r
2049 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
2051 /* Switch Newlib's _impure_ptr variable to point to the _reent
\r
2052 structure specific to the task that will run first.
\r
2053 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
2054 for additional information. */
\r
2055 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
\r
2057 #endif /* configUSE_NEWLIB_REENTRANT */
\r
2059 xNextTaskUnblockTime = portMAX_DELAY;
\r
2060 xSchedulerRunning = pdTRUE;
\r
2061 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
\r
2063 /* If configGENERATE_RUN_TIME_STATS is defined then the following
\r
2064 macro must be defined to configure the timer/counter used to generate
\r
2065 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
\r
2066 is set to 0 and the following line fails to build then ensure you do not
\r
2067 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
\r
2068 FreeRTOSConfig.h file. */
\r
2069 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
\r
2071 traceTASK_SWITCHED_IN();
\r
2073 /* Setting up the timer tick is hardware specific and thus in the
\r
2074 portable interface. */
\r
2075 if( xPortStartScheduler() != pdFALSE )
\r
2077 /* Should not reach here as if the scheduler is running the
\r
2078 function will not return. */
\r
2082 /* Should only reach here if a task calls xTaskEndScheduler(). */
\r
2087 /* This line will only be reached if the kernel could not be started,
\r
2088 because there was not enough FreeRTOS heap to create the idle task
\r
2089 or the timer task. */
\r
2090 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
\r
2093 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
\r
2094 meaning xIdleTaskHandle is not used anywhere else. */
\r
2095 ( void ) xIdleTaskHandle;
\r
2097 /*-----------------------------------------------------------*/
\r
2099 void vTaskEndScheduler( void )
\r
2101 /* Stop the scheduler interrupts and call the portable scheduler end
\r
2102 routine so the original ISRs can be restored if necessary. The port
\r
2103 layer must ensure interrupts enable bit is left in the correct state. */
\r
2104 portDISABLE_INTERRUPTS();
\r
2105 xSchedulerRunning = pdFALSE;
\r
2106 vPortEndScheduler();
\r
2108 /*----------------------------------------------------------*/
\r
2110 void vTaskSuspendAll( void )
\r
2112 /* A critical section is not required as the variable is of type
\r
2113 BaseType_t. Please read Richard Barry's reply in the following link to a
\r
2114 post in the FreeRTOS support forum before reporting this as a bug! -
\r
2115 http://goo.gl/wu4acr */
\r
2116 ++uxSchedulerSuspended;
\r
2117 portMEMORY_BARRIER();
\r
2119 /*----------------------------------------------------------*/
\r
2121 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
2123 static TickType_t prvGetExpectedIdleTime( void )
\r
2125 TickType_t xReturn;
\r
2126 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
\r
2128 /* uxHigherPriorityReadyTasks takes care of the case where
\r
2129 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
\r
2130 task that are in the Ready state, even though the idle task is
\r
2132 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
\r
2134 if( uxTopReadyPriority > tskIDLE_PRIORITY )
\r
2136 uxHigherPriorityReadyTasks = pdTRUE;
\r
2141 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
\r
2143 /* When port optimised task selection is used the uxTopReadyPriority
\r
2144 variable is used as a bit map. If bits other than the least
\r
2145 significant bit are set then there are tasks that have a priority
\r
2146 above the idle priority that are in the Ready state. This takes
\r
2147 care of the case where the co-operative scheduler is in use. */
\r
2148 if( uxTopReadyPriority > uxLeastSignificantBit )
\r
2150 uxHigherPriorityReadyTasks = pdTRUE;
\r
2155 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
\r
2159 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
\r
2161 /* There are other idle priority tasks in the ready state. If
\r
2162 time slicing is used then the very next tick interrupt must be
\r
2166 else if( uxHigherPriorityReadyTasks != pdFALSE )
\r
2168 /* There are tasks in the Ready state that have a priority above the
\r
2169 idle priority. This path can only be reached if
\r
2170 configUSE_PREEMPTION is 0. */
\r
2175 xReturn = xNextTaskUnblockTime - xTickCount;
\r
2181 #endif /* configUSE_TICKLESS_IDLE */
\r
2182 /*----------------------------------------------------------*/
\r
2184 BaseType_t xTaskResumeAll( void )
\r
2186 TCB_t *pxTCB = NULL;
\r
2187 BaseType_t xAlreadyYielded = pdFALSE;
\r
2188 TickType_t xTicksToNextUnblockTime;
\r
2190 /* If uxSchedulerSuspended is zero then this function does not match a
\r
2191 previous call to vTaskSuspendAll(). */
\r
2192 configASSERT( uxSchedulerSuspended );
\r
2194 /* It is possible that an ISR caused a task to be removed from an event
\r
2195 list while the scheduler was suspended. If this was the case then the
\r
2196 removed task will have been added to the xPendingReadyList. Once the
\r
2197 scheduler has been resumed it is safe to move all the pending ready
\r
2198 tasks from this list into their appropriate ready list. */
\r
2199 taskENTER_CRITICAL();
\r
2201 --uxSchedulerSuspended;
\r
2203 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
2205 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
\r
2207 /* Move any readied tasks from the pending list into the
\r
2208 appropriate ready list. */
\r
2209 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
\r
2211 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2212 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2213 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2214 prvAddTaskToReadyList( pxTCB );
\r
2216 /* If the moved task has a priority higher than the current
\r
2217 task then a yield must be performed. */
\r
2218 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
2220 xYieldPending = pdTRUE;
\r
2224 mtCOVERAGE_TEST_MARKER();
\r
2228 if( pxTCB != NULL )
\r
2230 /* A task was unblocked while the scheduler was suspended,
\r
2231 which may have prevented the next unblock time from being
\r
2232 re-calculated, in which case re-calculate it now. Mainly
\r
2233 important for low power tickless implementations, where
\r
2234 this can prevent an unnecessary exit from low power
\r
2236 prvResetNextTaskUnblockTime();
\r
2239 /* If any ticks occurred while the scheduler was suspended then
\r
2240 they should be processed now. This ensures the tick count does
\r
2241 not slip, and that any delayed tasks are resumed at the correct
\r
2243 while( xPendedTicks > ( TickType_t ) 0 )
\r
2245 /* Calculate how far into the future the next task will
\r
2246 leave the Blocked state because its timeout expired. If
\r
2247 there are no tasks due to leave the blocked state between
\r
2248 the time now and the time at which the tick count overflows
\r
2249 then xNextTaskUnblockTime will the tick overflow time.
\r
2250 This means xNextTaskUnblockTime can never be less than
\r
2251 xTickCount, and the following can therefore not
\r
2253 configASSERT( xNextTaskUnblockTime >= xTickCount );
\r
2254 xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount;
\r
2256 /* Don't want to move the tick count more than the number
\r
2257 of ticks that are pending, so cap if necessary. */
\r
2258 if( xTicksToNextUnblockTime > xPendedTicks )
\r
2260 xTicksToNextUnblockTime = xPendedTicks;
\r
2263 if( xTicksToNextUnblockTime == 0 )
\r
2265 /* xTicksToNextUnblockTime could be zero if the tick
\r
2266 count is about to overflow and xTicksToNetUnblockTime
\r
2267 holds the time at which the tick count will overflow
\r
2268 (rather than the time at which the next task will
\r
2269 unblock). Set to 1 otherwise xPendedTicks won't be
\r
2270 decremented below. */
\r
2271 xTicksToNextUnblockTime = ( TickType_t ) 1;
\r
2273 else if( xTicksToNextUnblockTime > ( TickType_t ) 1 )
\r
2275 /* Move the tick count one short of the next unblock
\r
2276 time, then call xTaskIncrementTick() to move the tick
\r
2277 count up to the next unblock time to unblock the task,
\r
2278 if any. This will also swap the blocked task and
\r
2279 overflow blocked task lists if necessary. */
\r
2280 xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 );
\r
2282 xYieldPending |= xTaskIncrementTick();
\r
2284 /* Adjust for the number of ticks just added to
\r
2285 xTickCount and go around the loop again if
\r
2286 xTicksToCatchUp is still greater than 0. */
\r
2287 xPendedTicks -= xTicksToNextUnblockTime;
\r
2290 if( xYieldPending != pdFALSE )
\r
2292 #if( configUSE_PREEMPTION != 0 )
\r
2294 xAlreadyYielded = pdTRUE;
\r
2297 taskYIELD_IF_USING_PREEMPTION();
\r
2301 mtCOVERAGE_TEST_MARKER();
\r
2307 mtCOVERAGE_TEST_MARKER();
\r
2310 taskEXIT_CRITICAL();
\r
2312 return xAlreadyYielded;
\r
2314 /*-----------------------------------------------------------*/
\r
2316 TickType_t xTaskGetTickCount( void )
\r
2318 TickType_t xTicks;
\r
2320 /* Critical section required if running on a 16 bit processor. */
\r
2321 portTICK_TYPE_ENTER_CRITICAL();
\r
2323 xTicks = xTickCount;
\r
2325 portTICK_TYPE_EXIT_CRITICAL();
\r
2329 /*-----------------------------------------------------------*/
\r
2331 TickType_t xTaskGetTickCountFromISR( void )
\r
2333 TickType_t xReturn;
\r
2334 UBaseType_t uxSavedInterruptStatus;
\r
2336 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
2337 system call (or maximum API call) interrupt priority. Interrupts that are
\r
2338 above the maximum system call priority are kept permanently enabled, even
\r
2339 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
2340 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
2341 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
2342 failure if a FreeRTOS API function is called from an interrupt that has been
\r
2343 assigned a priority above the configured maximum system call priority.
\r
2344 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
2345 that have been assigned a priority at or (logically) below the maximum
\r
2346 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
2347 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
2348 More information (albeit Cortex-M specific) is provided on the following
\r
2349 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
2350 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
2352 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
\r
2354 xReturn = xTickCount;
\r
2356 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
2360 /*-----------------------------------------------------------*/
\r
2362 UBaseType_t uxTaskGetNumberOfTasks( void )
\r
2364 /* A critical section is not required because the variables are of type
\r
2366 return uxCurrentNumberOfTasks;
\r
2368 /*-----------------------------------------------------------*/
\r
2370 char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2374 /* If null is passed in here then the name of the calling task is being
\r
2376 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
\r
2377 configASSERT( pxTCB );
\r
2378 return &( pxTCB->pcTaskName[ 0 ] );
\r
2380 /*-----------------------------------------------------------*/
\r
2382 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
2384 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
\r
2386 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
\r
2389 BaseType_t xBreakLoop;
\r
2391 /* This function is called with the scheduler suspended. */
\r
2393 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
\r
2395 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2399 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2401 /* Check each character in the name looking for a match or
\r
2403 xBreakLoop = pdFALSE;
\r
2404 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
\r
2406 cNextChar = pxNextTCB->pcTaskName[ x ];
\r
2408 if( cNextChar != pcNameToQuery[ x ] )
\r
2410 /* Characters didn't match. */
\r
2411 xBreakLoop = pdTRUE;
\r
2413 else if( cNextChar == ( char ) 0x00 )
\r
2415 /* Both strings terminated, a match must have been
\r
2417 pxReturn = pxNextTCB;
\r
2418 xBreakLoop = pdTRUE;
\r
2422 mtCOVERAGE_TEST_MARKER();
\r
2425 if( xBreakLoop != pdFALSE )
\r
2431 if( pxReturn != NULL )
\r
2433 /* The handle has been found. */
\r
2437 } while( pxNextTCB != pxFirstTCB );
\r
2441 mtCOVERAGE_TEST_MARKER();
\r
2447 #endif /* INCLUDE_xTaskGetHandle */
\r
2448 /*-----------------------------------------------------------*/
\r
2450 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
2452 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2454 UBaseType_t uxQueue = configMAX_PRIORITIES;
\r
2457 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
\r
2458 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
\r
2460 vTaskSuspendAll();
\r
2462 /* Search the ready lists. */
\r
2466 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
\r
2468 if( pxTCB != NULL )
\r
2470 /* Found the handle. */
\r
2474 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2476 /* Search the delayed lists. */
\r
2477 if( pxTCB == NULL )
\r
2479 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
\r
2482 if( pxTCB == NULL )
\r
2484 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
\r
2487 #if ( INCLUDE_vTaskSuspend == 1 )
\r
2489 if( pxTCB == NULL )
\r
2491 /* Search the suspended list. */
\r
2492 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
\r
2497 #if( INCLUDE_vTaskDelete == 1 )
\r
2499 if( pxTCB == NULL )
\r
2501 /* Search the deleted list. */
\r
2502 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
\r
2507 ( void ) xTaskResumeAll();
\r
2512 #endif /* INCLUDE_xTaskGetHandle */
\r
2513 /*-----------------------------------------------------------*/
\r
2515 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2517 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
\r
2519 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
\r
2521 vTaskSuspendAll();
\r
2523 /* Is there a space in the array for each task in the system? */
\r
2524 if( uxArraySize >= uxCurrentNumberOfTasks )
\r
2526 /* Fill in an TaskStatus_t structure with information on each
\r
2527 task in the Ready state. */
\r
2531 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
\r
2533 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2535 /* Fill in an TaskStatus_t structure with information on each
\r
2536 task in the Blocked state. */
\r
2537 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
\r
2538 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
\r
2540 #if( INCLUDE_vTaskDelete == 1 )
\r
2542 /* Fill in an TaskStatus_t structure with information on
\r
2543 each task that has been deleted but not yet cleaned up. */
\r
2544 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
\r
2548 #if ( INCLUDE_vTaskSuspend == 1 )
\r
2550 /* Fill in an TaskStatus_t structure with information on
\r
2551 each task in the Suspended state. */
\r
2552 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
\r
2556 #if ( configGENERATE_RUN_TIME_STATS == 1)
\r
2558 if( pulTotalRunTime != NULL )
\r
2560 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
\r
2561 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
\r
2563 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
\r
2569 if( pulTotalRunTime != NULL )
\r
2571 *pulTotalRunTime = 0;
\r
2578 mtCOVERAGE_TEST_MARKER();
\r
2581 ( void ) xTaskResumeAll();
\r
2586 #endif /* configUSE_TRACE_FACILITY */
\r
2587 /*----------------------------------------------------------*/
\r
2589 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
\r
2591 TaskHandle_t xTaskGetIdleTaskHandle( void )
\r
2593 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
\r
2594 started, then xIdleTaskHandle will be NULL. */
\r
2595 configASSERT( ( xIdleTaskHandle != NULL ) );
\r
2596 return xIdleTaskHandle;
\r
2599 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
\r
2600 /*----------------------------------------------------------*/
\r
2602 /* This conditional compilation should use inequality to 0, not equality to 1.
\r
2603 This is to ensure vTaskStepTick() is available when user defined low power mode
\r
2604 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
\r
2606 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
2608 void vTaskStepTick( const TickType_t xTicksToJump )
\r
2610 /* Correct the tick count value after a period during which the tick
\r
2611 was suppressed. Note this does *not* call the tick hook function for
\r
2612 each stepped tick. */
\r
2613 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
\r
2614 xTickCount += xTicksToJump;
\r
2615 traceINCREASE_TICK_COUNT( xTicksToJump );
\r
2618 #endif /* configUSE_TICKLESS_IDLE */
\r
2619 /*----------------------------------------------------------*/
\r
2621 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
\r
2623 BaseType_t xYieldRequired = pdFALSE;
\r
2625 /* Must not be called with the scheduler suspended as the implementation
\r
2626 relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
\r
2627 configASSERT( uxSchedulerSuspended == 0 );
\r
2629 /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
\r
2630 the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
\r
2631 vTaskSuspendAll();
\r
2632 xPendedTicks += xTicksToCatchUp;
\r
2633 xYieldRequired = xTaskResumeAll();
\r
2635 return xYieldRequired;
\r
2637 /*----------------------------------------------------------*/
\r
2639 #if ( INCLUDE_xTaskAbortDelay == 1 )
\r
2641 BaseType_t xTaskAbortDelayFromISR( TaskHandle_t xTask, BaseType_t * const pxHigherPriorityTaskWoken )
\r
2643 TCB_t *pxTCB = xTask;
\r
2644 BaseType_t xReturn;
\r
2645 UBaseType_t uxSavedInterruptStatus;
\r
2647 configASSERT( pxTCB );
\r
2649 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
2650 system call (or maximum API call) interrupt priority. Interrupts that are
\r
2651 above the maximum system call priority are kept permanently enabled, even
\r
2652 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
2653 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
2654 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
2655 failure if a FreeRTOS API function is called from an interrupt that has been
\r
2656 assigned a priority above the configured maximum system call priority.
\r
2657 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
2658 that have been assigned a priority at or (logically) below the maximum
\r
2659 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
2660 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
2661 More information (albeit Cortex-M specific) is provided on the following
\r
2662 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
2663 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
2665 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
2667 /* A task can only be prematurely removed from the Blocked state if
\r
2668 it is actually in the Blocked state. */
\r
2669 if( eTaskGetState( xTask ) == eBlocked )
\r
2673 /* Remove the reference to the task from the blocked list. A higher
\r
2674 priority interrupt won't touch the xStateListItem because of the
\r
2675 critical section. */
\r
2676 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2678 /* Is the task waiting on an event also? If so remove it from
\r
2679 the event list too. */
\r
2680 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2682 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2684 /* This lets the task know it was forcibly removed from the
\r
2685 blocked state so it should not re-evaluate its block time and
\r
2686 then block again. */
\r
2687 pxTCB->ucDelayAborted = pdTRUE;
\r
2691 mtCOVERAGE_TEST_MARKER();
\r
2694 /* Place the unblocked task into the appropriate ready list. */
\r
2695 prvAddTaskToReadyList( pxTCB );
\r
2697 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
2699 if( pxHigherPriorityTaskWoken != NULL )
\r
2701 /* Pend the yield to be performed when the scheduler
\r
2702 is unsuspended. */
\r
2703 *pxHigherPriorityTaskWoken = pdTRUE;
\r
2708 mtCOVERAGE_TEST_MARKER();
\r
2716 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
2722 /*----------------------------------------------------------*/
\r
2724 #if ( INCLUDE_xTaskAbortDelay == 1 )
\r
2726 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
\r
2728 TCB_t *pxTCB = xTask;
\r
2729 BaseType_t xReturn;
\r
2731 configASSERT( pxTCB );
\r
2733 vTaskSuspendAll();
\r
2735 /* A task can only be prematurely removed from the Blocked state if
\r
2736 it is actually in the Blocked state. */
\r
2737 if( eTaskGetState( xTask ) == eBlocked )
\r
2741 /* Remove the reference to the task from the blocked list. An
\r
2742 interrupt won't touch the xStateListItem because the
\r
2743 scheduler is suspended. */
\r
2744 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2746 /* Is the task waiting on an event also? If so remove it from
\r
2747 the event list too. Interrupts can touch the event list item,
\r
2748 even though the scheduler is suspended, so a critical section
\r
2750 taskENTER_CRITICAL();
\r
2752 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2754 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2756 /* This lets the task know it was forcibly removed from the
\r
2757 blocked state so it should not re-evaluate its block time and
\r
2758 then block again. */
\r
2759 pxTCB->ucDelayAborted = pdTRUE;
\r
2763 mtCOVERAGE_TEST_MARKER();
\r
2766 taskEXIT_CRITICAL();
\r
2768 /* Place the unblocked task into the appropriate ready list. */
\r
2769 prvAddTaskToReadyList( pxTCB );
\r
2771 /* A task being unblocked cannot cause an immediate context
\r
2772 switch if preemption is turned off. */
\r
2773 #if ( configUSE_PREEMPTION == 1 )
\r
2775 /* Preemption is on, but a context switch should only be
\r
2776 performed if the unblocked task has a priority that is
\r
2777 equal to or higher than the currently executing task. */
\r
2778 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
2780 /* Pend the yield to be performed when the scheduler
\r
2781 is unsuspended. */
\r
2782 xYieldPending = pdTRUE;
\r
2786 mtCOVERAGE_TEST_MARKER();
\r
2789 #endif /* configUSE_PREEMPTION */
\r
2796 ( void ) xTaskResumeAll();
\r
2801 #endif /* INCLUDE_xTaskAbortDelay */
\r
2802 /*----------------------------------------------------------*/
\r
2804 BaseType_t xTaskIncrementTick( void )
\r
2807 TickType_t xItemValue;
\r
2808 BaseType_t xSwitchRequired = pdFALSE;
\r
2810 /* Called by the portable layer each time a tick interrupt occurs.
\r
2811 Increments the tick then checks to see if the new tick value will cause any
\r
2812 tasks to be unblocked. */
\r
2813 traceTASK_INCREMENT_TICK( xTickCount );
\r
2814 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
2816 /* Minor optimisation. The tick count cannot change in this
\r
2818 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
\r
2820 /* Increment the RTOS tick, switching the delayed and overflowed
\r
2821 delayed lists if it wraps to 0. */
\r
2822 xTickCount = xConstTickCount;
\r
2824 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
\r
2826 taskSWITCH_DELAYED_LISTS();
\r
2830 mtCOVERAGE_TEST_MARKER();
\r
2833 /* See if this tick has made a timeout expire. Tasks are stored in
\r
2834 the queue in the order of their wake time - meaning once one task
\r
2835 has been found whose block time has not expired there is no need to
\r
2836 look any further down the list. */
\r
2837 if( xConstTickCount >= xNextTaskUnblockTime )
\r
2841 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
\r
2843 /* The delayed list is empty. Set xNextTaskUnblockTime
\r
2844 to the maximum possible value so it is extremely
\r
2846 if( xTickCount >= xNextTaskUnblockTime ) test will pass
\r
2847 next time through. */
\r
2848 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2853 /* The delayed list is not empty, get the value of the
\r
2854 item at the head of the delayed list. This is the time
\r
2855 at which the task at the head of the delayed list must
\r
2856 be removed from the Blocked state. */
\r
2857 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2858 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
\r
2860 if( xConstTickCount < xItemValue )
\r
2862 /* It is not time to unblock this item yet, but the
\r
2863 item value is the time at which the task at the head
\r
2864 of the blocked list must be removed from the Blocked
\r
2865 state - so record the item value in
\r
2866 xNextTaskUnblockTime. */
\r
2867 xNextTaskUnblockTime = xItemValue;
\r
2868 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
\r
2872 mtCOVERAGE_TEST_MARKER();
\r
2875 /* It is time to remove the item from the Blocked state. */
\r
2876 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2878 /* Is the task waiting on an event also? If so remove
\r
2879 it from the event list. */
\r
2880 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2882 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2886 mtCOVERAGE_TEST_MARKER();
\r
2889 /* Place the unblocked task into the appropriate ready
\r
2891 prvAddTaskToReadyList( pxTCB );
\r
2893 /* A task being unblocked cannot cause an immediate
\r
2894 context switch if preemption is turned off. */
\r
2895 #if ( configUSE_PREEMPTION == 1 )
\r
2897 /* Preemption is on, but a context switch should
\r
2898 only be performed if the unblocked task has a
\r
2899 priority that is equal to or higher than the
\r
2900 currently executing task. */
\r
2901 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
2903 xSwitchRequired = pdTRUE;
\r
2907 mtCOVERAGE_TEST_MARKER();
\r
2910 #endif /* configUSE_PREEMPTION */
\r
2915 /* Tasks of equal priority to the currently running task will share
\r
2916 processing time (time slice) if preemption is on, and the application
\r
2917 writer has not explicitly turned time slicing off. */
\r
2918 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
\r
2920 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
\r
2922 xSwitchRequired = pdTRUE;
\r
2926 mtCOVERAGE_TEST_MARKER();
\r
2929 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
\r
2931 #if ( configUSE_TICK_HOOK == 1 )
\r
2933 /* Guard against the tick hook being called when the pended tick
\r
2934 count is being unwound (when the scheduler is being unlocked). */
\r
2935 if( xPendedTicks == ( TickType_t ) 0 )
\r
2937 vApplicationTickHook();
\r
2941 mtCOVERAGE_TEST_MARKER();
\r
2944 #endif /* configUSE_TICK_HOOK */
\r
2946 #if ( configUSE_PREEMPTION == 1 )
\r
2948 if( xYieldPending != pdFALSE )
\r
2950 xSwitchRequired = pdTRUE;
\r
2954 mtCOVERAGE_TEST_MARKER();
\r
2957 #endif /* configUSE_PREEMPTION */
\r
2963 /* The tick hook gets called at regular intervals, even if the
\r
2964 scheduler is locked. */
\r
2965 #if ( configUSE_TICK_HOOK == 1 )
\r
2967 vApplicationTickHook();
\r
2972 return xSwitchRequired;
\r
2974 /*-----------------------------------------------------------*/
\r
2976 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2978 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
\r
2982 /* If xTask is NULL then it is the task hook of the calling task that is
\r
2984 if( xTask == NULL )
\r
2986 xTCB = ( TCB_t * ) pxCurrentTCB;
\r
2993 /* Save the hook function in the TCB. A critical section is required as
\r
2994 the value can be accessed from an interrupt. */
\r
2995 taskENTER_CRITICAL();
\r
2997 xTCB->pxTaskTag = pxHookFunction;
\r
2999 taskEXIT_CRITICAL();
\r
3002 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
3003 /*-----------------------------------------------------------*/
\r
3005 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
3007 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
\r
3010 TaskHookFunction_t xReturn;
\r
3012 /* If xTask is NULL then set the calling task's hook. */
\r
3013 pxTCB = prvGetTCBFromHandle( xTask );
\r
3015 /* Save the hook function in the TCB. A critical section is required as
\r
3016 the value can be accessed from an interrupt. */
\r
3017 taskENTER_CRITICAL();
\r
3019 xReturn = pxTCB->pxTaskTag;
\r
3021 taskEXIT_CRITICAL();
\r
3026 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
3027 /*-----------------------------------------------------------*/
\r
3029 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
3031 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
\r
3034 TaskHookFunction_t xReturn;
\r
3035 UBaseType_t uxSavedInterruptStatus;
\r
3037 /* If xTask is NULL then set the calling task's hook. */
\r
3038 pxTCB = prvGetTCBFromHandle( xTask );
\r
3040 /* Save the hook function in the TCB. A critical section is required as
\r
3041 the value can be accessed from an interrupt. */
\r
3042 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
3044 xReturn = pxTCB->pxTaskTag;
\r
3046 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
3051 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
3052 /*-----------------------------------------------------------*/
\r
3054 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
3056 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
\r
3059 BaseType_t xReturn;
\r
3061 /* If xTask is NULL then we are calling our own task hook. */
\r
3062 if( xTask == NULL )
\r
3064 xTCB = pxCurrentTCB;
\r
3071 if( xTCB->pxTaskTag != NULL )
\r
3073 xReturn = xTCB->pxTaskTag( pvParameter );
\r
3083 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
3084 /*-----------------------------------------------------------*/
\r
3086 void vTaskSwitchContext( void )
\r
3088 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
\r
3090 /* The scheduler is currently suspended - do not allow a context
\r
3092 xYieldPending = pdTRUE;
\r
3096 xYieldPending = pdFALSE;
\r
3097 traceTASK_SWITCHED_OUT();
\r
3099 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
3101 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
\r
3102 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
\r
3104 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
\r
3107 /* Add the amount of time the task has been running to the
\r
3108 accumulated time so far. The time the task started running was
\r
3109 stored in ulTaskSwitchedInTime. Note that there is no overflow
\r
3110 protection here so count values are only valid until the timer
\r
3111 overflows. The guard against negative values is to protect
\r
3112 against suspect run time stat counter implementations - which
\r
3113 are provided by the application, not the kernel. */
\r
3114 if( ulTotalRunTime > ulTaskSwitchedInTime )
\r
3116 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
\r
3120 mtCOVERAGE_TEST_MARKER();
\r
3122 ulTaskSwitchedInTime = ulTotalRunTime;
\r
3124 #endif /* configGENERATE_RUN_TIME_STATS */
\r
3126 /* Check for stack overflow, if configured. */
\r
3127 taskCHECK_FOR_STACK_OVERFLOW();
\r
3129 /* Before the currently running task is switched out, save its errno. */
\r
3130 #if( configUSE_POSIX_ERRNO == 1 )
\r
3132 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
\r
3136 /* Select a new task to run using either the generic C or port
\r
3137 optimised asm code. */
\r
3138 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3139 traceTASK_SWITCHED_IN();
\r
3141 /* After the new task is switched in, update the global errno. */
\r
3142 #if( configUSE_POSIX_ERRNO == 1 )
\r
3144 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
\r
3148 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
3150 /* Switch Newlib's _impure_ptr variable to point to the _reent
\r
3151 structure specific to this task.
\r
3152 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
3153 for additional information. */
\r
3154 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
\r
3156 #endif /* configUSE_NEWLIB_REENTRANT */
\r
3159 /*-----------------------------------------------------------*/
\r
3161 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
\r
3163 configASSERT( pxEventList );
\r
3165 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
\r
3166 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
\r
3168 /* Place the event list item of the TCB in the appropriate event list.
\r
3169 This is placed in the list in priority order so the highest priority task
\r
3170 is the first to be woken by the event. The queue that contains the event
\r
3171 list is locked, preventing simultaneous access from interrupts. */
\r
3172 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
3174 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
3176 /*-----------------------------------------------------------*/
\r
3178 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
\r
3180 configASSERT( pxEventList );
\r
3182 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
\r
3183 the event groups implementation. */
\r
3184 configASSERT( uxSchedulerSuspended != 0 );
\r
3186 /* Store the item value in the event list item. It is safe to access the
\r
3187 event list item here as interrupts won't access the event list item of a
\r
3188 task that is not in the Blocked state. */
\r
3189 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
\r
3191 /* Place the event list item of the TCB at the end of the appropriate event
\r
3192 list. It is safe to access the event list here because it is part of an
\r
3193 event group implementation - and interrupts don't access event groups
\r
3194 directly (instead they access them indirectly by pending function calls to
\r
3195 the task level). */
\r
3196 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
3198 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
3200 /*-----------------------------------------------------------*/
\r
3202 #if( configUSE_TIMERS == 1 )
\r
3204 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
3206 configASSERT( pxEventList );
\r
3208 /* This function should not be called by application code hence the
\r
3209 'Restricted' in its name. It is not part of the public API. It is
\r
3210 designed for use by kernel code, and has special calling requirements -
\r
3211 it should be called with the scheduler suspended. */
\r
3214 /* Place the event list item of the TCB in the appropriate event list.
\r
3215 In this case it is assume that this is the only task that is going to
\r
3216 be waiting on this event list, so the faster vListInsertEnd() function
\r
3217 can be used in place of vListInsert. */
\r
3218 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
3220 /* If the task should block indefinitely then set the block time to a
\r
3221 value that will be recognised as an indefinite delay inside the
\r
3222 prvAddCurrentTaskToDelayedList() function. */
\r
3223 if( xWaitIndefinitely != pdFALSE )
\r
3225 xTicksToWait = portMAX_DELAY;
\r
3228 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
\r
3229 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
\r
3232 #endif /* configUSE_TIMERS */
\r
3233 /*-----------------------------------------------------------*/
\r
3235 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
\r
3237 TCB_t *pxUnblockedTCB;
\r
3238 BaseType_t xReturn;
\r
3240 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
\r
3241 called from a critical section within an ISR. */
\r
3243 /* The event list is sorted in priority order, so the first in the list can
\r
3244 be removed as it is known to be the highest priority. Remove the TCB from
\r
3245 the delayed list, and add it to the ready list.
\r
3247 If an event is for a queue that is locked then this function will never
\r
3248 get called - the lock count on the queue will get modified instead. This
\r
3249 means exclusive access to the event list is guaranteed here.
\r
3251 This function assumes that a check has already been made to ensure that
\r
3252 pxEventList is not empty. */
\r
3253 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3254 configASSERT( pxUnblockedTCB );
\r
3255 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
\r
3257 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
3259 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
\r
3260 prvAddTaskToReadyList( pxUnblockedTCB );
\r
3262 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3264 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
\r
3265 might be set to the blocked task's time out time. If the task is
\r
3266 unblocked for a reason other than a timeout xNextTaskUnblockTime is
\r
3267 normally left unchanged, because it is automatically reset to a new
\r
3268 value when the tick count equals xNextTaskUnblockTime. However if
\r
3269 tickless idling is used it might be more important to enter sleep mode
\r
3270 at the earliest possible time - so reset xNextTaskUnblockTime here to
\r
3271 ensure it is updated at the earliest possible time. */
\r
3272 prvResetNextTaskUnblockTime();
\r
3278 /* The delayed and ready lists cannot be accessed, so hold this task
\r
3279 pending until the scheduler is resumed. */
\r
3280 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
\r
3283 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
3285 /* Return true if the task removed from the event list has a higher
\r
3286 priority than the calling task. This allows the calling task to know if
\r
3287 it should force a context switch now. */
\r
3290 /* Mark that a yield is pending in case the user is not using the
\r
3291 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
\r
3292 xYieldPending = pdTRUE;
\r
3296 xReturn = pdFALSE;
\r
3301 /*-----------------------------------------------------------*/
\r
3303 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
\r
3305 TCB_t *pxUnblockedTCB;
\r
3307 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
\r
3308 the event flags implementation. */
\r
3309 configASSERT( uxSchedulerSuspended != pdFALSE );
\r
3311 /* Store the new item value in the event list. */
\r
3312 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
\r
3314 /* Remove the event list form the event flag. Interrupts do not access
\r
3316 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3317 configASSERT( pxUnblockedTCB );
\r
3318 ( void ) uxListRemove( pxEventListItem );
\r
3320 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3322 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
\r
3323 might be set to the blocked task's time out time. If the task is
\r
3324 unblocked for a reason other than a timeout xNextTaskUnblockTime is
\r
3325 normally left unchanged, because it is automatically reset to a new
\r
3326 value when the tick count equals xNextTaskUnblockTime. However if
\r
3327 tickless idling is used it might be more important to enter sleep mode
\r
3328 at the earliest possible time - so reset xNextTaskUnblockTime here to
\r
3329 ensure it is updated at the earliest possible time. */
\r
3330 prvResetNextTaskUnblockTime();
\r
3334 /* Remove the task from the delayed list and add it to the ready list. The
\r
3335 scheduler is suspended so interrupts will not be accessing the ready
\r
3337 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
\r
3338 prvAddTaskToReadyList( pxUnblockedTCB );
\r
3340 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
3342 /* The unblocked task has a priority above that of the calling task, so
\r
3343 a context switch is required. This function is called with the
\r
3344 scheduler suspended so xYieldPending is set so the context switch
\r
3345 occurs immediately that the scheduler is resumed (unsuspended). */
\r
3346 xYieldPending = pdTRUE;
\r
3349 /*-----------------------------------------------------------*/
\r
3351 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
\r
3353 configASSERT( pxTimeOut );
\r
3354 taskENTER_CRITICAL();
\r
3356 pxTimeOut->xOverflowCount = xNumOfOverflows;
\r
3357 pxTimeOut->xTimeOnEntering = xTickCount;
\r
3359 taskEXIT_CRITICAL();
\r
3361 /*-----------------------------------------------------------*/
\r
3363 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
\r
3365 /* For internal use only as it does not use a critical section. */
\r
3366 pxTimeOut->xOverflowCount = xNumOfOverflows;
\r
3367 pxTimeOut->xTimeOnEntering = xTickCount;
\r
3369 /*-----------------------------------------------------------*/
\r
3371 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
\r
3373 BaseType_t xReturn;
\r
3375 configASSERT( pxTimeOut );
\r
3376 configASSERT( pxTicksToWait );
\r
3378 taskENTER_CRITICAL();
\r
3380 /* Minor optimisation. The tick count cannot change in this block. */
\r
3381 const TickType_t xConstTickCount = xTickCount;
\r
3382 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
\r
3384 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
3385 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
\r
3387 /* The delay was aborted, which is not the same as a time out,
\r
3388 but has the same result. */
\r
3389 pxCurrentTCB->ucDelayAborted = pdFALSE;
\r
3395 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3396 if( *pxTicksToWait == portMAX_DELAY )
\r
3398 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
\r
3399 specified is the maximum block time then the task should block
\r
3400 indefinitely, and therefore never time out. */
\r
3401 xReturn = pdFALSE;
\r
3406 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
\r
3408 /* The tick count is greater than the time at which
\r
3409 vTaskSetTimeout() was called, but has also overflowed since
\r
3410 vTaskSetTimeOut() was called. It must have wrapped all the way
\r
3411 around and gone past again. This passed since vTaskSetTimeout()
\r
3415 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
\r
3417 /* Not a genuine timeout. Adjust parameters for time remaining. */
\r
3418 *pxTicksToWait -= xElapsedTime;
\r
3419 vTaskInternalSetTimeOutState( pxTimeOut );
\r
3420 xReturn = pdFALSE;
\r
3424 *pxTicksToWait = 0;
\r
3428 taskEXIT_CRITICAL();
\r
3432 /*-----------------------------------------------------------*/
\r
3434 void vTaskMissedYield( void )
\r
3436 xYieldPending = pdTRUE;
\r
3438 /*-----------------------------------------------------------*/
\r
3440 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3442 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
\r
3444 UBaseType_t uxReturn;
\r
3445 TCB_t const *pxTCB;
\r
3447 if( xTask != NULL )
\r
3450 uxReturn = pxTCB->uxTaskNumber;
\r
3460 #endif /* configUSE_TRACE_FACILITY */
\r
3461 /*-----------------------------------------------------------*/
\r
3463 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3465 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
\r
3469 if( xTask != NULL )
\r
3472 pxTCB->uxTaskNumber = uxHandle;
\r
3476 #endif /* configUSE_TRACE_FACILITY */
\r
3479 * -----------------------------------------------------------
\r
3481 * ----------------------------------------------------------
\r
3483 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
\r
3484 * language extensions. The equivalent prototype for this function is:
\r
3486 * void prvIdleTask( void *pvParameters );
\r
3489 static portTASK_FUNCTION( prvIdleTask, pvParameters )
\r
3491 /* Stop warnings. */
\r
3492 ( void ) pvParameters;
\r
3494 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
\r
3495 SCHEDULER IS STARTED. **/
\r
3497 /* In case a task that has a secure context deletes itself, in which case
\r
3498 the idle task is responsible for deleting the task's secure context, if
\r
3500 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
\r
3504 /* See if any tasks have deleted themselves - if so then the idle task
\r
3505 is responsible for freeing the deleted task's TCB and stack. */
\r
3506 prvCheckTasksWaitingTermination();
\r
3508 #if ( configUSE_PREEMPTION == 0 )
\r
3510 /* If we are not using preemption we keep forcing a task switch to
\r
3511 see if any other task has become available. If we are using
\r
3512 preemption we don't need to do this as any task becoming available
\r
3513 will automatically get the processor anyway. */
\r
3516 #endif /* configUSE_PREEMPTION */
\r
3518 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
\r
3520 /* When using preemption tasks of equal priority will be
\r
3521 timesliced. If a task that is sharing the idle priority is ready
\r
3522 to run then the idle task should yield before the end of the
\r
3525 A critical region is not required here as we are just reading from
\r
3526 the list, and an occasional incorrect value will not matter. If
\r
3527 the ready list at the idle priority contains more than one task
\r
3528 then a task other than the idle task is ready to execute. */
\r
3529 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
\r
3535 mtCOVERAGE_TEST_MARKER();
\r
3538 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
\r
3540 #if ( configUSE_IDLE_HOOK == 1 )
\r
3542 extern void vApplicationIdleHook( void );
\r
3544 /* Call the user defined function from within the idle task. This
\r
3545 allows the application designer to add background functionality
\r
3546 without the overhead of a separate task.
\r
3547 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
\r
3548 CALL A FUNCTION THAT MIGHT BLOCK. */
\r
3549 vApplicationIdleHook();
\r
3551 #endif /* configUSE_IDLE_HOOK */
\r
3553 /* This conditional compilation should use inequality to 0, not equality
\r
3554 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
\r
3555 user defined low power mode implementations require
\r
3556 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
\r
3557 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
3559 TickType_t xExpectedIdleTime;
\r
3561 /* It is not desirable to suspend then resume the scheduler on
\r
3562 each iteration of the idle task. Therefore, a preliminary
\r
3563 test of the expected idle time is performed without the
\r
3564 scheduler suspended. The result here is not necessarily
\r
3566 xExpectedIdleTime = prvGetExpectedIdleTime();
\r
3568 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
\r
3570 vTaskSuspendAll();
\r
3572 /* Now the scheduler is suspended, the expected idle
\r
3573 time can be sampled again, and this time its value can
\r
3575 configASSERT( xNextTaskUnblockTime >= xTickCount );
\r
3576 xExpectedIdleTime = prvGetExpectedIdleTime();
\r
3578 /* Define the following macro to set xExpectedIdleTime to 0
\r
3579 if the application does not want
\r
3580 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
\r
3581 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
\r
3583 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
\r
3585 traceLOW_POWER_IDLE_BEGIN();
\r
3586 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
\r
3587 traceLOW_POWER_IDLE_END();
\r
3591 mtCOVERAGE_TEST_MARKER();
\r
3594 ( void ) xTaskResumeAll();
\r
3598 mtCOVERAGE_TEST_MARKER();
\r
3601 #endif /* configUSE_TICKLESS_IDLE */
\r
3604 /*-----------------------------------------------------------*/
\r
3606 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3608 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
\r
3610 /* The idle task exists in addition to the application tasks. */
\r
3611 const UBaseType_t uxNonApplicationTasks = 1;
\r
3612 eSleepModeStatus eReturn = eStandardSleep;
\r
3614 /* This function must be called from a critical section. */
\r
3616 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
\r
3618 /* A task was made ready while the scheduler was suspended. */
\r
3619 eReturn = eAbortSleep;
\r
3621 else if( xYieldPending != pdFALSE )
\r
3623 /* A yield was pended while the scheduler was suspended. */
\r
3624 eReturn = eAbortSleep;
\r
3628 /* If all the tasks are in the suspended list (which might mean they
\r
3629 have an infinite block time rather than actually being suspended)
\r
3630 then it is safe to turn all clocks off and just wait for external
\r
3632 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
\r
3634 eReturn = eNoTasksWaitingTimeout;
\r
3638 mtCOVERAGE_TEST_MARKER();
\r
3645 #endif /* configUSE_TICKLESS_IDLE */
\r
3646 /*-----------------------------------------------------------*/
\r
3648 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
3650 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
\r
3654 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
\r
3656 pxTCB = prvGetTCBFromHandle( xTaskToSet );
\r
3657 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
\r
3661 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
\r
3662 /*-----------------------------------------------------------*/
\r
3664 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
3666 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
\r
3668 void *pvReturn = NULL;
\r
3671 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
\r
3673 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
\r
3674 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
\r
3684 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
\r
3685 /*-----------------------------------------------------------*/
\r
3687 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
3689 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
\r
3693 /* If null is passed in here then we are modifying the MPU settings of
\r
3694 the calling task. */
\r
3695 pxTCB = prvGetTCBFromHandle( xTaskToModify );
\r
3697 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
\r
3700 #endif /* portUSING_MPU_WRAPPERS */
\r
3701 /*-----------------------------------------------------------*/
\r
3703 static void prvInitialiseTaskLists( void )
\r
3705 UBaseType_t uxPriority;
\r
3707 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
\r
3709 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
\r
3712 vListInitialise( &xDelayedTaskList1 );
\r
3713 vListInitialise( &xDelayedTaskList2 );
\r
3714 vListInitialise( &xPendingReadyList );
\r
3716 #if ( INCLUDE_vTaskDelete == 1 )
\r
3718 vListInitialise( &xTasksWaitingTermination );
\r
3720 #endif /* INCLUDE_vTaskDelete */
\r
3722 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3724 vListInitialise( &xSuspendedTaskList );
\r
3726 #endif /* INCLUDE_vTaskSuspend */
\r
3728 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
\r
3730 pxDelayedTaskList = &xDelayedTaskList1;
\r
3731 pxOverflowDelayedTaskList = &xDelayedTaskList2;
\r
3733 /*-----------------------------------------------------------*/
\r
3735 static void prvCheckTasksWaitingTermination( void )
\r
3738 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
\r
3740 #if ( INCLUDE_vTaskDelete == 1 )
\r
3744 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
\r
3745 being called too often in the idle task. */
\r
3746 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
\r
3748 taskENTER_CRITICAL();
\r
3750 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3751 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
3752 --uxCurrentNumberOfTasks;
\r
3753 --uxDeletedTasksWaitingCleanUp;
\r
3755 taskEXIT_CRITICAL();
\r
3757 prvDeleteTCB( pxTCB );
\r
3760 #endif /* INCLUDE_vTaskDelete */
\r
3762 /*-----------------------------------------------------------*/
\r
3764 #if( configUSE_TRACE_FACILITY == 1 )
\r
3766 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
\r
3770 /* xTask is NULL then get the state of the calling task. */
\r
3771 pxTCB = prvGetTCBFromHandle( xTask );
\r
3773 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
\r
3774 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
\r
3775 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
\r
3776 pxTaskStatus->pxStackBase = pxTCB->pxStack;
\r
3777 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
\r
3779 #if ( configUSE_MUTEXES == 1 )
\r
3781 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
\r
3785 pxTaskStatus->uxBasePriority = 0;
\r
3789 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
3791 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
\r
3795 pxTaskStatus->ulRunTimeCounter = 0;
\r
3799 /* Obtaining the task state is a little fiddly, so is only done if the
\r
3800 value of eState passed into this function is eInvalid - otherwise the
\r
3801 state is just set to whatever is passed in. */
\r
3802 if( eState != eInvalid )
\r
3804 if( pxTCB == pxCurrentTCB )
\r
3806 pxTaskStatus->eCurrentState = eRunning;
\r
3810 pxTaskStatus->eCurrentState = eState;
\r
3812 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3814 /* If the task is in the suspended list then there is a
\r
3815 chance it is actually just blocked indefinitely - so really
\r
3816 it should be reported as being in the Blocked state. */
\r
3817 if( eState == eSuspended )
\r
3819 vTaskSuspendAll();
\r
3821 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
3823 pxTaskStatus->eCurrentState = eBlocked;
\r
3826 ( void ) xTaskResumeAll();
\r
3829 #endif /* INCLUDE_vTaskSuspend */
\r
3834 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
\r
3837 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
\r
3838 parameter is provided to allow it to be skipped. */
\r
3839 if( xGetFreeStackSpace != pdFALSE )
\r
3841 #if ( portSTACK_GROWTH > 0 )
\r
3843 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
\r
3847 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
\r
3853 pxTaskStatus->usStackHighWaterMark = 0;
\r
3857 #endif /* configUSE_TRACE_FACILITY */
\r
3858 /*-----------------------------------------------------------*/
\r
3860 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3862 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
\r
3864 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
\r
3865 UBaseType_t uxTask = 0;
\r
3867 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
\r
3869 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3871 /* Populate an TaskStatus_t structure within the
\r
3872 pxTaskStatusArray array for each task that is referenced from
\r
3873 pxList. See the definition of TaskStatus_t in task.h for the
\r
3874 meaning of each TaskStatus_t structure member. */
\r
3877 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3878 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
\r
3880 } while( pxNextTCB != pxFirstTCB );
\r
3884 mtCOVERAGE_TEST_MARKER();
\r
3890 #endif /* configUSE_TRACE_FACILITY */
\r
3891 /*-----------------------------------------------------------*/
\r
3893 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
\r
3895 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
\r
3897 uint32_t ulCount = 0U;
\r
3899 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
\r
3901 pucStackByte -= portSTACK_GROWTH;
\r
3905 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
\r
3907 return ( configSTACK_DEPTH_TYPE ) ulCount;
\r
3910 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
\r
3911 /*-----------------------------------------------------------*/
\r
3913 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
\r
3915 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
\r
3916 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
\r
3917 user to determine the return type. It gets around the problem of the value
\r
3918 overflowing on 8-bit types without breaking backward compatibility for
\r
3919 applications that expect an 8-bit return type. */
\r
3920 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
\r
3923 uint8_t *pucEndOfStack;
\r
3924 configSTACK_DEPTH_TYPE uxReturn;
\r
3926 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
\r
3927 the same except for their return type. Using configSTACK_DEPTH_TYPE
\r
3928 allows the user to determine the return type. It gets around the
\r
3929 problem of the value overflowing on 8-bit types without breaking
\r
3930 backward compatibility for applications that expect an 8-bit return
\r
3933 pxTCB = prvGetTCBFromHandle( xTask );
\r
3935 #if portSTACK_GROWTH < 0
\r
3937 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
\r
3941 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
\r
3945 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
\r
3950 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
\r
3951 /*-----------------------------------------------------------*/
\r
3953 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
\r
3955 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
\r
3958 uint8_t *pucEndOfStack;
\r
3959 UBaseType_t uxReturn;
\r
3961 pxTCB = prvGetTCBFromHandle( xTask );
\r
3963 #if portSTACK_GROWTH < 0
\r
3965 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
\r
3969 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
\r
3973 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
\r
3978 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
\r
3979 /*-----------------------------------------------------------*/
\r
3981 #if ( INCLUDE_vTaskDelete == 1 )
\r
3983 static void prvDeleteTCB( TCB_t *pxTCB )
\r
3985 /* This call is required specifically for the TriCore port. It must be
\r
3986 above the vPortFree() calls. The call is also used by ports/demos that
\r
3987 want to allocate and clean RAM statically. */
\r
3988 portCLEAN_UP_TCB( pxTCB );
\r
3990 /* Free up the memory allocated by the scheduler for the task. It is up
\r
3991 to the task to free any memory allocated at the application level.
\r
3992 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
3993 for additional information. */
\r
3994 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
3996 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
\r
3998 #endif /* configUSE_NEWLIB_REENTRANT */
\r
4000 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
\r
4002 /* The task can only have been allocated dynamically - free both
\r
4003 the stack and TCB. */
\r
4004 vPortFree( pxTCB->pxStack );
\r
4005 vPortFree( pxTCB );
\r
4007 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
\r
4009 /* The task could have been allocated statically or dynamically, so
\r
4010 check what was statically allocated before trying to free the
\r
4012 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
\r
4014 /* Both the stack and TCB were allocated dynamically, so both
\r
4016 vPortFree( pxTCB->pxStack );
\r
4017 vPortFree( pxTCB );
\r
4019 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
\r
4021 /* Only the stack was statically allocated, so the TCB is the
\r
4022 only memory that must be freed. */
\r
4023 vPortFree( pxTCB );
\r
4027 /* Neither the stack nor the TCB were allocated dynamically, so
\r
4028 nothing needs to be freed. */
\r
4029 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
\r
4030 mtCOVERAGE_TEST_MARKER();
\r
4033 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
4036 #endif /* INCLUDE_vTaskDelete */
\r
4037 /*-----------------------------------------------------------*/
\r
4039 static void prvResetNextTaskUnblockTime( void )
\r
4043 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
\r
4045 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
\r
4046 the maximum possible value so it is extremely unlikely that the
\r
4047 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
\r
4048 there is an item in the delayed list. */
\r
4049 xNextTaskUnblockTime = portMAX_DELAY;
\r
4053 /* The new current delayed list is not empty, get the value of
\r
4054 the item at the head of the delayed list. This is the time at
\r
4055 which the task at the head of the delayed list should be removed
\r
4056 from the Blocked state. */
\r
4057 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
4058 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
\r
4061 /*-----------------------------------------------------------*/
\r
4063 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
\r
4065 TaskHandle_t xTaskGetCurrentTaskHandle( void )
\r
4067 TaskHandle_t xReturn;
\r
4069 /* A critical section is not required as this is not called from
\r
4070 an interrupt and the current TCB will always be the same for any
\r
4071 individual execution thread. */
\r
4072 xReturn = pxCurrentTCB;
\r
4077 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
\r
4078 /*-----------------------------------------------------------*/
\r
4080 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
4082 BaseType_t xTaskGetSchedulerState( void )
\r
4084 BaseType_t xReturn;
\r
4086 if( xSchedulerRunning == pdFALSE )
\r
4088 xReturn = taskSCHEDULER_NOT_STARTED;
\r
4092 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
4094 xReturn = taskSCHEDULER_RUNNING;
\r
4098 xReturn = taskSCHEDULER_SUSPENDED;
\r
4105 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
\r
4106 /*-----------------------------------------------------------*/
\r
4108 #if ( configUSE_MUTEXES == 1 )
\r
4110 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
\r
4112 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
\r
4113 BaseType_t xReturn = pdFALSE;
\r
4115 /* If the mutex was given back by an interrupt while the queue was
\r
4116 locked then the mutex holder might now be NULL. _RB_ Is this still
\r
4117 needed as interrupts can no longer use mutexes? */
\r
4118 if( pxMutexHolder != NULL )
\r
4120 /* If the holder of the mutex has a priority below the priority of
\r
4121 the task attempting to obtain the mutex then it will temporarily
\r
4122 inherit the priority of the task attempting to obtain the mutex. */
\r
4123 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
\r
4125 /* Adjust the mutex holder state to account for its new
\r
4126 priority. Only reset the event list item value if the value is
\r
4127 not being used for anything else. */
\r
4128 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
4130 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4134 mtCOVERAGE_TEST_MARKER();
\r
4137 /* If the task being modified is in the ready state it will need
\r
4138 to be moved into a new list. */
\r
4139 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
\r
4141 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4143 /* It is known that the task is in its ready list so
\r
4144 there is no need to check again and the port level
\r
4145 reset macro can be called directly. */
\r
4146 portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
\r
4150 mtCOVERAGE_TEST_MARKER();
\r
4153 /* Inherit the priority before being moved into the new list. */
\r
4154 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
\r
4155 prvAddTaskToReadyList( pxMutexHolderTCB );
\r
4159 /* Just inherit the priority. */
\r
4160 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
\r
4163 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
\r
4165 /* Inheritance occurred. */
\r
4170 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
\r
4172 /* The base priority of the mutex holder is lower than the
\r
4173 priority of the task attempting to take the mutex, but the
\r
4174 current priority of the mutex holder is not lower than the
\r
4175 priority of the task attempting to take the mutex.
\r
4176 Therefore the mutex holder must have already inherited a
\r
4177 priority, but inheritance would have occurred if that had
\r
4178 not been the case. */
\r
4183 mtCOVERAGE_TEST_MARKER();
\r
4189 mtCOVERAGE_TEST_MARKER();
\r
4195 #endif /* configUSE_MUTEXES */
\r
4196 /*-----------------------------------------------------------*/
\r
4198 #if ( configUSE_MUTEXES == 1 )
\r
4200 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
\r
4202 TCB_t * const pxTCB = pxMutexHolder;
\r
4203 BaseType_t xReturn = pdFALSE;
\r
4205 if( pxMutexHolder != NULL )
\r
4207 /* A task can only have an inherited priority if it holds the mutex.
\r
4208 If the mutex is held by a task then it cannot be given from an
\r
4209 interrupt, and if a mutex is given by the holding task then it must
\r
4210 be the running state task. */
\r
4211 configASSERT( pxTCB == pxCurrentTCB );
\r
4212 configASSERT( pxTCB->uxMutexesHeld );
\r
4213 ( pxTCB->uxMutexesHeld )--;
\r
4215 /* Has the holder of the mutex inherited the priority of another
\r
4217 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
\r
4219 /* Only disinherit if no other mutexes are held. */
\r
4220 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
\r
4222 /* A task can only have an inherited priority if it holds
\r
4223 the mutex. If the mutex is held by a task then it cannot be
\r
4224 given from an interrupt, and if a mutex is given by the
\r
4225 holding task then it must be the running state task. Remove
\r
4226 the holding task from the ready/delayed list. */
\r
4227 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4229 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
4233 mtCOVERAGE_TEST_MARKER();
\r
4236 /* Disinherit the priority before adding the task into the
\r
4237 new ready list. */
\r
4238 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
\r
4239 pxTCB->uxPriority = pxTCB->uxBasePriority;
\r
4241 /* Reset the event list item value. It cannot be in use for
\r
4242 any other purpose if this task is running, and it must be
\r
4243 running to give back the mutex. */
\r
4244 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4245 prvAddTaskToReadyList( pxTCB );
\r
4247 /* Return true to indicate that a context switch is required.
\r
4248 This is only actually required in the corner case whereby
\r
4249 multiple mutexes were held and the mutexes were given back
\r
4250 in an order different to that in which they were taken.
\r
4251 If a context switch did not occur when the first mutex was
\r
4252 returned, even if a task was waiting on it, then a context
\r
4253 switch should occur when the last mutex is returned whether
\r
4254 a task is waiting on it or not. */
\r
4259 mtCOVERAGE_TEST_MARKER();
\r
4264 mtCOVERAGE_TEST_MARKER();
\r
4269 mtCOVERAGE_TEST_MARKER();
\r
4275 #endif /* configUSE_MUTEXES */
\r
4276 /*-----------------------------------------------------------*/
\r
4278 #if ( configUSE_MUTEXES == 1 )
\r
4280 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
\r
4282 TCB_t * const pxTCB = pxMutexHolder;
\r
4283 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
\r
4284 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
\r
4286 if( pxMutexHolder != NULL )
\r
4288 /* If pxMutexHolder is not NULL then the holder must hold at least
\r
4290 configASSERT( pxTCB->uxMutexesHeld );
\r
4292 /* Determine the priority to which the priority of the task that
\r
4293 holds the mutex should be set. This will be the greater of the
\r
4294 holding task's base priority and the priority of the highest
\r
4295 priority task that is waiting to obtain the mutex. */
\r
4296 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
\r
4298 uxPriorityToUse = uxHighestPriorityWaitingTask;
\r
4302 uxPriorityToUse = pxTCB->uxBasePriority;
\r
4305 /* Does the priority need to change? */
\r
4306 if( pxTCB->uxPriority != uxPriorityToUse )
\r
4308 /* Only disinherit if no other mutexes are held. This is a
\r
4309 simplification in the priority inheritance implementation. If
\r
4310 the task that holds the mutex is also holding other mutexes then
\r
4311 the other mutexes may have caused the priority inheritance. */
\r
4312 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
\r
4314 /* If a task has timed out because it already holds the
\r
4315 mutex it was trying to obtain then it cannot of inherited
\r
4316 its own priority. */
\r
4317 configASSERT( pxTCB != pxCurrentTCB );
\r
4319 /* Disinherit the priority, remembering the previous
\r
4320 priority to facilitate determining the subject task's
\r
4322 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
\r
4323 uxPriorityUsedOnEntry = pxTCB->uxPriority;
\r
4324 pxTCB->uxPriority = uxPriorityToUse;
\r
4326 /* Only reset the event list item value if the value is not
\r
4327 being used for anything else. */
\r
4328 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
4330 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4334 mtCOVERAGE_TEST_MARKER();
\r
4337 /* If the running task is not the task that holds the mutex
\r
4338 then the task that holds the mutex could be in either the
\r
4339 Ready, Blocked or Suspended states. Only remove the task
\r
4340 from its current state list if it is in the Ready state as
\r
4341 the task's priority is going to change and there is one
\r
4342 Ready list per priority. */
\r
4343 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
4345 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4347 /* It is known that the task is in its ready list so
\r
4348 there is no need to check again and the port level
\r
4349 reset macro can be called directly. */
\r
4350 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
\r
4354 mtCOVERAGE_TEST_MARKER();
\r
4357 prvAddTaskToReadyList( pxTCB );
\r
4361 mtCOVERAGE_TEST_MARKER();
\r
4366 mtCOVERAGE_TEST_MARKER();
\r
4371 mtCOVERAGE_TEST_MARKER();
\r
4376 mtCOVERAGE_TEST_MARKER();
\r
4380 #endif /* configUSE_MUTEXES */
\r
4381 /*-----------------------------------------------------------*/
\r
4383 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
4385 void vTaskEnterCritical( void )
\r
4387 portDISABLE_INTERRUPTS();
\r
4389 if( xSchedulerRunning != pdFALSE )
\r
4391 ( pxCurrentTCB->uxCriticalNesting )++;
\r
4393 /* This is not the interrupt safe version of the enter critical
\r
4394 function so assert() if it is being called from an interrupt
\r
4395 context. Only API functions that end in "FromISR" can be used in an
\r
4396 interrupt. Only assert if the critical nesting count is 1 to
\r
4397 protect against recursive calls if the assert function also uses a
\r
4398 critical section. */
\r
4399 if( pxCurrentTCB->uxCriticalNesting == 1 )
\r
4401 portASSERT_IF_IN_ISR();
\r
4406 mtCOVERAGE_TEST_MARKER();
\r
4410 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
4411 /*-----------------------------------------------------------*/
\r
4413 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
4415 void vTaskExitCritical( void )
\r
4417 if( xSchedulerRunning != pdFALSE )
\r
4419 if( pxCurrentTCB->uxCriticalNesting > 0U )
\r
4421 ( pxCurrentTCB->uxCriticalNesting )--;
\r
4423 if( pxCurrentTCB->uxCriticalNesting == 0U )
\r
4425 portENABLE_INTERRUPTS();
\r
4429 mtCOVERAGE_TEST_MARKER();
\r
4434 mtCOVERAGE_TEST_MARKER();
\r
4439 mtCOVERAGE_TEST_MARKER();
\r
4443 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
4444 /*-----------------------------------------------------------*/
\r
4446 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
\r
4448 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
\r
4452 /* Start by copying the entire string. */
\r
4453 strcpy( pcBuffer, pcTaskName );
\r
4455 /* Pad the end of the string with spaces to ensure columns line up when
\r
4457 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
\r
4459 pcBuffer[ x ] = ' ';
\r
4463 pcBuffer[ x ] = ( char ) 0x00;
\r
4465 /* Return the new end of string. */
\r
4466 return &( pcBuffer[ x ] );
\r
4469 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
\r
4470 /*-----------------------------------------------------------*/
\r
4472 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
4474 void vTaskList( char * pcWriteBuffer )
\r
4476 TaskStatus_t *pxTaskStatusArray;
\r
4477 UBaseType_t uxArraySize, x;
\r
4483 * This function is provided for convenience only, and is used by many
\r
4484 * of the demo applications. Do not consider it to be part of the
\r
4487 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
\r
4488 * uxTaskGetSystemState() output into a human readable table that
\r
4489 * displays task names, states and stack usage.
\r
4491 * vTaskList() has a dependency on the sprintf() C library function that
\r
4492 * might bloat the code size, use a lot of stack, and provide different
\r
4493 * results on different platforms. An alternative, tiny, third party,
\r
4494 * and limited functionality implementation of sprintf() is provided in
\r
4495 * many of the FreeRTOS/Demo sub-directories in a file called
\r
4496 * printf-stdarg.c (note printf-stdarg.c does not provide a full
\r
4497 * snprintf() implementation!).
\r
4499 * It is recommended that production systems call uxTaskGetSystemState()
\r
4500 * directly to get access to raw stats data, rather than indirectly
\r
4501 * through a call to vTaskList().
\r
4505 /* Make sure the write buffer does not contain a string. */
\r
4506 *pcWriteBuffer = ( char ) 0x00;
\r
4508 /* Take a snapshot of the number of tasks in case it changes while this
\r
4509 function is executing. */
\r
4510 uxArraySize = uxCurrentNumberOfTasks;
\r
4512 /* Allocate an array index for each task. NOTE! if
\r
4513 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
\r
4514 equate to NULL. */
\r
4515 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
\r
4517 if( pxTaskStatusArray != NULL )
\r
4519 /* Generate the (binary) data. */
\r
4520 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
\r
4522 /* Create a human readable table from the binary data. */
\r
4523 for( x = 0; x < uxArraySize; x++ )
\r
4525 switch( pxTaskStatusArray[ x ].eCurrentState )
\r
4527 case eRunning: cStatus = tskRUNNING_CHAR;
\r
4530 case eReady: cStatus = tskREADY_CHAR;
\r
4533 case eBlocked: cStatus = tskBLOCKED_CHAR;
\r
4536 case eSuspended: cStatus = tskSUSPENDED_CHAR;
\r
4539 case eDeleted: cStatus = tskDELETED_CHAR;
\r
4542 case eInvalid: /* Fall through. */
\r
4543 default: /* Should not get here, but it is included
\r
4544 to prevent static checking errors. */
\r
4545 cStatus = ( char ) 0x00;
\r
4549 /* Write the task name to the string, padding with spaces so it
\r
4550 can be printed in tabular form more easily. */
\r
4551 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
\r
4553 /* Write the rest of the string. */
\r
4554 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
\r
4555 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
\r
4558 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
\r
4559 is 0 then vPortFree() will be #defined to nothing. */
\r
4560 vPortFree( pxTaskStatusArray );
\r
4564 mtCOVERAGE_TEST_MARKER();
\r
4568 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
4569 /*----------------------------------------------------------*/
\r
4571 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
4573 void vTaskGetRunTimeStats( char *pcWriteBuffer )
\r
4575 TaskStatus_t *pxTaskStatusArray;
\r
4576 UBaseType_t uxArraySize, x;
\r
4577 uint32_t ulTotalTime, ulStatsAsPercentage;
\r
4579 #if( configUSE_TRACE_FACILITY != 1 )
\r
4581 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
\r
4588 * This function is provided for convenience only, and is used by many
\r
4589 * of the demo applications. Do not consider it to be part of the
\r
4592 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
\r
4593 * of the uxTaskGetSystemState() output into a human readable table that
\r
4594 * displays the amount of time each task has spent in the Running state
\r
4595 * in both absolute and percentage terms.
\r
4597 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
\r
4598 * function that might bloat the code size, use a lot of stack, and
\r
4599 * provide different results on different platforms. An alternative,
\r
4600 * tiny, third party, and limited functionality implementation of
\r
4601 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
\r
4602 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
\r
4603 * a full snprintf() implementation!).
\r
4605 * It is recommended that production systems call uxTaskGetSystemState()
\r
4606 * directly to get access to raw stats data, rather than indirectly
\r
4607 * through a call to vTaskGetRunTimeStats().
\r
4610 /* Make sure the write buffer does not contain a string. */
\r
4611 *pcWriteBuffer = ( char ) 0x00;
\r
4613 /* Take a snapshot of the number of tasks in case it changes while this
\r
4614 function is executing. */
\r
4615 uxArraySize = uxCurrentNumberOfTasks;
\r
4617 /* Allocate an array index for each task. NOTE! If
\r
4618 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
\r
4619 equate to NULL. */
\r
4620 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
\r
4622 if( pxTaskStatusArray != NULL )
\r
4624 /* Generate the (binary) data. */
\r
4625 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
\r
4627 /* For percentage calculations. */
\r
4628 ulTotalTime /= 100UL;
\r
4630 /* Avoid divide by zero errors. */
\r
4631 if( ulTotalTime > 0UL )
\r
4633 /* Create a human readable table from the binary data. */
\r
4634 for( x = 0; x < uxArraySize; x++ )
\r
4636 /* What percentage of the total run time has the task used?
\r
4637 This will always be rounded down to the nearest integer.
\r
4638 ulTotalRunTimeDiv100 has already been divided by 100. */
\r
4639 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
\r
4641 /* Write the task name to the string, padding with
\r
4642 spaces so it can be printed in tabular form more
\r
4644 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
\r
4646 if( ulStatsAsPercentage > 0UL )
\r
4648 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
\r
4650 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
\r
4654 /* sizeof( int ) == sizeof( long ) so a smaller
\r
4655 printf() library can be used. */
\r
4656 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
\r
4662 /* If the percentage is zero here then the task has
\r
4663 consumed less than 1% of the total run time. */
\r
4664 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
\r
4666 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
\r
4670 /* sizeof( int ) == sizeof( long ) so a smaller
\r
4671 printf() library can be used. */
\r
4672 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
\r
4677 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
\r
4682 mtCOVERAGE_TEST_MARKER();
\r
4685 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
\r
4686 is 0 then vPortFree() will be #defined to nothing. */
\r
4687 vPortFree( pxTaskStatusArray );
\r
4691 mtCOVERAGE_TEST_MARKER();
\r
4695 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
\r
4696 /*-----------------------------------------------------------*/
\r
4698 TickType_t uxTaskResetEventItemValue( void )
\r
4700 TickType_t uxReturn;
\r
4702 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
\r
4704 /* Reset the event list item to its normal value - so it can be used with
\r
4705 queues and semaphores. */
\r
4706 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4710 /*-----------------------------------------------------------*/
\r
4712 #if ( configUSE_MUTEXES == 1 )
\r
4714 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
\r
4716 /* If xSemaphoreCreateMutex() is called before any tasks have been created
\r
4717 then pxCurrentTCB will be NULL. */
\r
4718 if( pxCurrentTCB != NULL )
\r
4720 ( pxCurrentTCB->uxMutexesHeld )++;
\r
4723 return pxCurrentTCB;
\r
4726 #endif /* configUSE_MUTEXES */
\r
4727 /*-----------------------------------------------------------*/
\r
4729 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4731 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
\r
4733 uint32_t ulReturn;
\r
4735 taskENTER_CRITICAL();
\r
4737 /* Only block if the notification count is not already non-zero. */
\r
4738 if( pxCurrentTCB->ulNotifiedValue == 0UL )
\r
4740 /* Mark this task as waiting for a notification. */
\r
4741 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
\r
4743 if( xTicksToWait > ( TickType_t ) 0 )
\r
4745 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
4746 traceTASK_NOTIFY_TAKE_BLOCK();
\r
4748 /* All ports are written to allow a yield in a critical
\r
4749 section (some will yield immediately, others wait until the
\r
4750 critical section exits) - but it is not something that
\r
4751 application code should ever do. */
\r
4752 portYIELD_WITHIN_API();
\r
4756 mtCOVERAGE_TEST_MARKER();
\r
4761 mtCOVERAGE_TEST_MARKER();
\r
4764 taskEXIT_CRITICAL();
\r
4766 taskENTER_CRITICAL();
\r
4768 traceTASK_NOTIFY_TAKE();
\r
4769 ulReturn = pxCurrentTCB->ulNotifiedValue;
\r
4771 if( ulReturn != 0UL )
\r
4773 if( xClearCountOnExit != pdFALSE )
\r
4775 pxCurrentTCB->ulNotifiedValue = 0UL;
\r
4779 pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
\r
4784 mtCOVERAGE_TEST_MARKER();
\r
4787 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
4789 taskEXIT_CRITICAL();
\r
4794 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4795 /*-----------------------------------------------------------*/
\r
4797 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4799 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
\r
4801 BaseType_t xReturn;
\r
4803 taskENTER_CRITICAL();
\r
4805 /* Only block if a notification is not already pending. */
\r
4806 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
\r
4808 /* Clear bits in the task's notification value as bits may get
\r
4809 set by the notifying task or interrupt. This can be used to
\r
4810 clear the value to zero. */
\r
4811 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
\r
4813 /* Mark this task as waiting for a notification. */
\r
4814 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
\r
4816 if( xTicksToWait > ( TickType_t ) 0 )
\r
4818 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
4819 traceTASK_NOTIFY_WAIT_BLOCK();
\r
4821 /* All ports are written to allow a yield in a critical
\r
4822 section (some will yield immediately, others wait until the
\r
4823 critical section exits) - but it is not something that
\r
4824 application code should ever do. */
\r
4825 portYIELD_WITHIN_API();
\r
4829 mtCOVERAGE_TEST_MARKER();
\r
4834 mtCOVERAGE_TEST_MARKER();
\r
4837 taskEXIT_CRITICAL();
\r
4839 taskENTER_CRITICAL();
\r
4841 traceTASK_NOTIFY_WAIT();
\r
4843 if( pulNotificationValue != NULL )
\r
4845 /* Output the current notification value, which may or may not
\r
4847 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
\r
4850 /* If ucNotifyValue is set then either the task never entered the
\r
4851 blocked state (because a notification was already pending) or the
\r
4852 task unblocked because of a notification. Otherwise the task
\r
4853 unblocked because of a timeout. */
\r
4854 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
\r
4856 /* A notification was not received. */
\r
4857 xReturn = pdFALSE;
\r
4861 /* A notification was already pending or a notification was
\r
4862 received while the task was waiting. */
\r
4863 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
\r
4867 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
4869 taskEXIT_CRITICAL();
\r
4874 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4875 /*-----------------------------------------------------------*/
\r
4877 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4879 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
\r
4882 BaseType_t xReturn = pdPASS;
\r
4883 uint8_t ucOriginalNotifyState;
\r
4885 configASSERT( xTaskToNotify );
\r
4886 pxTCB = xTaskToNotify;
\r
4888 taskENTER_CRITICAL();
\r
4890 if( pulPreviousNotificationValue != NULL )
\r
4892 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
\r
4895 ucOriginalNotifyState = pxTCB->ucNotifyState;
\r
4897 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
\r
4902 pxTCB->ulNotifiedValue |= ulValue;
\r
4906 ( pxTCB->ulNotifiedValue )++;
\r
4909 case eSetValueWithOverwrite :
\r
4910 pxTCB->ulNotifiedValue = ulValue;
\r
4913 case eSetValueWithoutOverwrite :
\r
4914 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
\r
4916 pxTCB->ulNotifiedValue = ulValue;
\r
4920 /* The value could not be written to the task. */
\r
4926 /* The task is being notified without its notify value being
\r
4931 /* Should not get here if all enums are handled.
\r
4932 Artificially force an assert by testing a value the
\r
4933 compiler can't assume is const. */
\r
4934 configASSERT( pxTCB->ulNotifiedValue == ~0UL );
\r
4939 traceTASK_NOTIFY();
\r
4941 /* If the task is in the blocked state specifically to wait for a
\r
4942 notification then unblock it now. */
\r
4943 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
4945 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
4946 prvAddTaskToReadyList( pxTCB );
\r
4948 /* The task should not have been on an event list. */
\r
4949 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
4951 #if( configUSE_TICKLESS_IDLE != 0 )
\r
4953 /* If a task is blocked waiting for a notification then
\r
4954 xNextTaskUnblockTime might be set to the blocked task's time
\r
4955 out time. If the task is unblocked for a reason other than
\r
4956 a timeout xNextTaskUnblockTime is normally left unchanged,
\r
4957 because it will automatically get reset to a new value when
\r
4958 the tick count equals xNextTaskUnblockTime. However if
\r
4959 tickless idling is used it might be more important to enter
\r
4960 sleep mode at the earliest possible time - so reset
\r
4961 xNextTaskUnblockTime here to ensure it is updated at the
\r
4962 earliest possible time. */
\r
4963 prvResetNextTaskUnblockTime();
\r
4967 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
4969 /* The notified task has a priority above the currently
\r
4970 executing task so a yield is required. */
\r
4971 taskYIELD_IF_USING_PREEMPTION();
\r
4975 mtCOVERAGE_TEST_MARKER();
\r
4980 mtCOVERAGE_TEST_MARKER();
\r
4983 taskEXIT_CRITICAL();
\r
4988 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4989 /*-----------------------------------------------------------*/
\r
4991 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4993 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
\r
4996 uint8_t ucOriginalNotifyState;
\r
4997 BaseType_t xReturn = pdPASS;
\r
4998 UBaseType_t uxSavedInterruptStatus;
\r
5000 configASSERT( xTaskToNotify );
\r
5002 /* RTOS ports that support interrupt nesting have the concept of a
\r
5003 maximum system call (or maximum API call) interrupt priority.
\r
5004 Interrupts that are above the maximum system call priority are keep
\r
5005 permanently enabled, even when the RTOS kernel is in a critical section,
\r
5006 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
5007 is defined in FreeRTOSConfig.h then
\r
5008 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
5009 failure if a FreeRTOS API function is called from an interrupt that has
\r
5010 been assigned a priority above the configured maximum system call
\r
5011 priority. Only FreeRTOS functions that end in FromISR can be called
\r
5012 from interrupts that have been assigned a priority at or (logically)
\r
5013 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
5014 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
5015 simple as possible. More information (albeit Cortex-M specific) is
\r
5016 provided on the following link:
\r
5017 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
5018 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
5020 pxTCB = xTaskToNotify;
\r
5022 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
5024 if( pulPreviousNotificationValue != NULL )
\r
5026 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
\r
5029 ucOriginalNotifyState = pxTCB->ucNotifyState;
\r
5030 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
\r
5035 pxTCB->ulNotifiedValue |= ulValue;
\r
5039 ( pxTCB->ulNotifiedValue )++;
\r
5042 case eSetValueWithOverwrite :
\r
5043 pxTCB->ulNotifiedValue = ulValue;
\r
5046 case eSetValueWithoutOverwrite :
\r
5047 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
\r
5049 pxTCB->ulNotifiedValue = ulValue;
\r
5053 /* The value could not be written to the task. */
\r
5059 /* The task is being notified without its notify value being
\r
5064 /* Should not get here if all enums are handled.
\r
5065 Artificially force an assert by testing a value the
\r
5066 compiler can't assume is const. */
\r
5067 configASSERT( pxTCB->ulNotifiedValue == ~0UL );
\r
5071 traceTASK_NOTIFY_FROM_ISR();
\r
5073 /* If the task is in the blocked state specifically to wait for a
\r
5074 notification then unblock it now. */
\r
5075 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
5077 /* The task should not have been on an event list. */
\r
5078 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
5080 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
5082 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
5083 prvAddTaskToReadyList( pxTCB );
\r
5087 /* The delayed and ready lists cannot be accessed, so hold
\r
5088 this task pending until the scheduler is resumed. */
\r
5089 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
5092 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
5094 /* The notified task has a priority above the currently
\r
5095 executing task so a yield is required. */
\r
5096 if( pxHigherPriorityTaskWoken != NULL )
\r
5098 *pxHigherPriorityTaskWoken = pdTRUE;
\r
5101 /* Mark that a yield is pending in case the user is not
\r
5102 using the "xHigherPriorityTaskWoken" parameter to an ISR
\r
5103 safe FreeRTOS function. */
\r
5104 xYieldPending = pdTRUE;
\r
5108 mtCOVERAGE_TEST_MARKER();
\r
5112 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
5117 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5118 /*-----------------------------------------------------------*/
\r
5120 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
5122 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
\r
5125 uint8_t ucOriginalNotifyState;
\r
5126 UBaseType_t uxSavedInterruptStatus;
\r
5128 configASSERT( xTaskToNotify );
\r
5130 /* RTOS ports that support interrupt nesting have the concept of a
\r
5131 maximum system call (or maximum API call) interrupt priority.
\r
5132 Interrupts that are above the maximum system call priority are keep
\r
5133 permanently enabled, even when the RTOS kernel is in a critical section,
\r
5134 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
5135 is defined in FreeRTOSConfig.h then
\r
5136 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
5137 failure if a FreeRTOS API function is called from an interrupt that has
\r
5138 been assigned a priority above the configured maximum system call
\r
5139 priority. Only FreeRTOS functions that end in FromISR can be called
\r
5140 from interrupts that have been assigned a priority at or (logically)
\r
5141 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
5142 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
5143 simple as possible. More information (albeit Cortex-M specific) is
\r
5144 provided on the following link:
\r
5145 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
5146 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
5148 pxTCB = xTaskToNotify;
\r
5150 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
5152 ucOriginalNotifyState = pxTCB->ucNotifyState;
\r
5153 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
\r
5155 /* 'Giving' is equivalent to incrementing a count in a counting
\r
5157 ( pxTCB->ulNotifiedValue )++;
\r
5159 traceTASK_NOTIFY_GIVE_FROM_ISR();
\r
5161 /* If the task is in the blocked state specifically to wait for a
\r
5162 notification then unblock it now. */
\r
5163 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
5165 /* The task should not have been on an event list. */
\r
5166 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
5168 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
5170 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
5171 prvAddTaskToReadyList( pxTCB );
\r
5175 /* The delayed and ready lists cannot be accessed, so hold
\r
5176 this task pending until the scheduler is resumed. */
\r
5177 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
5180 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
5182 /* The notified task has a priority above the currently
\r
5183 executing task so a yield is required. */
\r
5184 if( pxHigherPriorityTaskWoken != NULL )
\r
5186 *pxHigherPriorityTaskWoken = pdTRUE;
\r
5189 /* Mark that a yield is pending in case the user is not
\r
5190 using the "xHigherPriorityTaskWoken" parameter in an ISR
\r
5191 safe FreeRTOS function. */
\r
5192 xYieldPending = pdTRUE;
\r
5196 mtCOVERAGE_TEST_MARKER();
\r
5200 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
5203 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5204 /*-----------------------------------------------------------*/
\r
5206 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
5208 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
\r
5211 BaseType_t xReturn;
\r
5213 /* If null is passed in here then it is the calling task that is having
\r
5214 its notification state cleared. */
\r
5215 pxTCB = prvGetTCBFromHandle( xTask );
\r
5217 taskENTER_CRITICAL();
\r
5219 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
\r
5221 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
\r
5229 taskEXIT_CRITICAL();
\r
5234 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5235 /*-----------------------------------------------------------*/
\r
5237 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
5239 uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear )
\r
5242 uint32_t ulReturn;
\r
5244 /* If null is passed in here then it is the calling task that is having
\r
5245 its notification state cleared. */
\r
5246 pxTCB = prvGetTCBFromHandle( xTask );
\r
5248 taskENTER_CRITICAL();
\r
5250 /* Return the notification as it was before the bits were cleared,
\r
5251 then clear the bit mask. */
\r
5252 ulReturn = pxCurrentTCB->ulNotifiedValue;
\r
5253 pxTCB->ulNotifiedValue &= ~ulBitsToClear;
\r
5255 taskEXIT_CRITICAL();
\r
5260 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5261 /*-----------------------------------------------------------*/
\r
5263 #if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
\r
5265 uint32_t ulTaskGetIdleRunTimeCounter( void )
\r
5267 return xIdleTaskHandle->ulRunTimeCounter;
\r
5271 /*-----------------------------------------------------------*/
\r
5273 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
\r
5275 TickType_t xTimeToWake;
\r
5276 const TickType_t xConstTickCount = xTickCount;
\r
5278 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
5280 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
\r
5281 reset to pdFALSE so it can be detected as having been set to pdTRUE
\r
5282 when the task leaves the Blocked state. */
\r
5283 pxCurrentTCB->ucDelayAborted = pdFALSE;
\r
5287 /* Remove the task from the ready list before adding it to the blocked list
\r
5288 as the same list item is used for both lists. */
\r
5289 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
5291 /* The current task must be in a ready list, so there is no need to
\r
5292 check, and the port reset macro can be called directly. */
\r
5293 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
\r
5297 mtCOVERAGE_TEST_MARKER();
\r
5300 #if ( INCLUDE_vTaskSuspend == 1 )
\r
5302 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
\r
5304 /* Add the task to the suspended task list instead of a delayed task
\r
5305 list to ensure it is not woken by a timing event. It will block
\r
5307 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5311 /* Calculate the time at which the task should be woken if the event
\r
5312 does not occur. This may overflow but this doesn't matter, the
\r
5313 kernel will manage it correctly. */
\r
5314 xTimeToWake = xConstTickCount + xTicksToWait;
\r
5316 /* The list item will be inserted in wake time order. */
\r
5317 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
\r
5319 if( xTimeToWake < xConstTickCount )
\r
5321 /* Wake time has overflowed. Place this item in the overflow
\r
5323 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5327 /* The wake time has not overflowed, so the current block list
\r
5329 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5331 /* If the task entering the blocked state was placed at the
\r
5332 head of the list of blocked tasks then xNextTaskUnblockTime
\r
5333 needs to be updated too. */
\r
5334 if( xTimeToWake < xNextTaskUnblockTime )
\r
5336 xNextTaskUnblockTime = xTimeToWake;
\r
5340 mtCOVERAGE_TEST_MARKER();
\r
5345 #else /* INCLUDE_vTaskSuspend */
\r
5347 /* Calculate the time at which the task should be woken if the event
\r
5348 does not occur. This may overflow but this doesn't matter, the kernel
\r
5349 will manage it correctly. */
\r
5350 xTimeToWake = xConstTickCount + xTicksToWait;
\r
5352 /* The list item will be inserted in wake time order. */
\r
5353 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
\r
5355 if( xTimeToWake < xConstTickCount )
\r
5357 /* Wake time has overflowed. Place this item in the overflow list. */
\r
5358 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5362 /* The wake time has not overflowed, so the current block list is used. */
\r
5363 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5365 /* If the task entering the blocked state was placed at the head of the
\r
5366 list of blocked tasks then xNextTaskUnblockTime needs to be updated
\r
5368 if( xTimeToWake < xNextTaskUnblockTime )
\r
5370 xNextTaskUnblockTime = xTimeToWake;
\r
5374 mtCOVERAGE_TEST_MARKER();
\r
5378 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
\r
5379 ( void ) xCanBlockIndefinitely;
\r
5381 #endif /* INCLUDE_vTaskSuspend */
\r
5384 /* Code below here allows additional code to be inserted into this source file,
\r
5385 especially where access to file scope functions and data is needed (for example
\r
5386 when performing module tests). */
\r
5388 #ifdef FREERTOS_MODULE_TEST
\r
5389 #include "tasks_test_access_functions.h"
\r
5393 #if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
\r
5395 #include "freertos_tasks_c_additions.h"
\r
5397 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
5398 static void freertos_tasks_c_additions_init( void )
\r
5400 FREERTOS_TASKS_C_ADDITIONS_INIT();
\r