]> git.sur5r.net Git - freertos/blobdiff - FreeRTOS/Source/tasks.c
If tickless idle mode is in use then ensure prvResetNextTaskUnblockTime() is called...
[freertos] / FreeRTOS / Source / tasks.c
index ceb863c588080c414f6c3f9b741b1c9532e4138a..e62b388602fcaec3178b6791d746295fb22f4be7 100644 (file)
@@ -1,6 +1,6 @@
 /*\r
 /*\r
- * FreeRTOS Kernel V10.0.1\r
- * Copyright (C) 2017 Amazon.com, Inc. or its affiliates.  All Rights Reserved.\r
+ * FreeRTOS Kernel V10.2.1\r
+ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates.  All Rights Reserved.\r
  *\r
  * Permission is hereby granted, free of charge, to any person obtaining a copy of\r
  * this software and associated documentation files (the "Software"), to deal in\r
  *\r
  * Permission is hereby granted, free of charge, to any person obtaining a copy of\r
  * this software and associated documentation files (the "Software"), to deal in\r
@@ -75,24 +75,7 @@ functions but without including stdio.h here. */
  */\r
 #define tskSTACK_FILL_BYTE     ( 0xa5U )\r
 \r
  */\r
 #define tskSTACK_FILL_BYTE     ( 0xa5U )\r
 \r
-/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using\r
-dynamically allocated RAM, in which case when any task is deleted it is known\r
-that both the task's stack and TCB need to be freed.  Sometimes the\r
-FreeRTOSConfig.h settings only allow a task to be created using statically\r
-allocated RAM, in which case when any task is deleted it is known that neither\r
-the task's stack or TCB should be freed.  Sometimes the FreeRTOSConfig.h\r
-settings allow a task to be created using either statically or dynamically\r
-allocated RAM, in which case a member of the TCB is used to record whether the\r
-stack and/or TCB were allocated statically or dynamically, so when a task is\r
-deleted the RAM that was allocated dynamically is freed again and no attempt is\r
-made to free the RAM that was allocated statically.\r
-tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a\r
-task to be created using either statically or dynamically allocated RAM.  Note\r
-that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with\r
-a statically allocated stack and a dynamically allocated TCB.\r
-!!!NOTE!!! If the definition of tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is\r
-changed then the definition of StaticTask_t must also be updated. */\r
-#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE      ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
+/* Bits used to recored how a task's stack and TCB were allocated. */\r
 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB                 ( ( uint8_t ) 0 )\r
 #define tskSTATICALLY_ALLOCATED_STACK_ONLY                     ( ( uint8_t ) 1 )\r
 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB          ( ( uint8_t ) 2 )\r
 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB                 ( ( uint8_t ) 0 )\r
 #define tskSTATICALLY_ALLOCATED_STACK_ONLY                     ( ( uint8_t ) 1 )\r
 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB          ( ( uint8_t ) 2 )\r
@@ -100,7 +83,7 @@ changed then the definition of StaticTask_t must also be updated. */
 /* If any of the following are set then task stacks are filled with a known\r
 value so the high water mark can be determined.  If none of the following are\r
 set then don't fill the stack so there is no unnecessary dependency on memset. */\r
 /* If any of the following are set then task stacks are filled with a known\r
 value so the high water mark can be determined.  If none of the following are\r
 set then don't fill the stack so there is no unnecessary dependency on memset. */\r
-#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )\r
+#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )\r
        #define tskSET_NEW_STACKS_TO_KNOWN_VALUE        1\r
 #else\r
        #define tskSET_NEW_STACKS_TO_KNOWN_VALUE        0\r
        #define tskSET_NEW_STACKS_TO_KNOWN_VALUE        1\r
 #else\r
        #define tskSET_NEW_STACKS_TO_KNOWN_VALUE        0\r
@@ -266,7 +249,7 @@ to its original value when it is released. */
  * and stores task state information, including a pointer to the task's context\r
  * (the task's run time environment, including register values)\r
  */\r
  * and stores task state information, including a pointer to the task's context\r
  * (the task's run time environment, including register values)\r
  */\r
-typedef struct TaskControlBlock_t\r
+typedef struct tskTaskControlBlock                     /* The old naming convention is used to prevent breaking kernel aware debuggers. */\r
 {\r
        volatile StackType_t    *pxTopOfStack;  /*< Points to the location of the last item placed on the tasks stack.  THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */\r
 \r
 {\r
        volatile StackType_t    *pxTopOfStack;  /*< Points to the location of the last item placed on the tasks stack.  THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */\r
 \r
@@ -317,7 +300,10 @@ typedef struct TaskControlBlock_t
                responsible for resulting newlib operation.  User must be familiar with\r
                newlib and must provide system-wide implementations of the necessary\r
                stubs. Be warned that (at the time of writing) the current newlib design\r
                responsible for resulting newlib operation.  User must be familiar with\r
                newlib and must provide system-wide implementations of the necessary\r
                stubs. Be warned that (at the time of writing) the current newlib design\r
-               implements a system-wide malloc() that must be provided with locks. */\r
+               implements a system-wide malloc() that must be provided with locks.\r
+\r
+               See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+               for additional information. */\r
                struct  _reent xNewLib_reent;\r
        #endif\r
 \r
                struct  _reent xNewLib_reent;\r
        #endif\r
 \r
@@ -326,7 +312,7 @@ typedef struct TaskControlBlock_t
                volatile uint8_t ucNotifyState;\r
        #endif\r
 \r
                volatile uint8_t ucNotifyState;\r
        #endif\r
 \r
-       /* See the comments above the definition of\r
+       /* See the comments in FreeRTOS.h with the definition of\r
        tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */\r
        #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */\r
                uint8_t ucStaticallyAllocated;          /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */\r
        tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */\r
        #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */\r
                uint8_t ucStaticallyAllocated;          /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */\r
@@ -336,6 +322,10 @@ typedef struct TaskControlBlock_t
                uint8_t ucDelayAborted;\r
        #endif\r
 \r
                uint8_t ucDelayAborted;\r
        #endif\r
 \r
+       #if( configUSE_POSIX_ERRNO == 1 )\r
+               int iTaskErrno;\r
+       #endif\r
+\r
 } tskTCB;\r
 \r
 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name\r
 } tskTCB;\r
 \r
 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name\r
@@ -346,8 +336,13 @@ typedef tskTCB TCB_t;
 which static variables must be declared volatile. */\r
 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;\r
 \r
 which static variables must be declared volatile. */\r
 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;\r
 \r
-/* Lists for ready and blocked tasks. --------------------*/\r
+/* Lists for ready and blocked tasks. --------------------\r
+xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but\r
+doing so breaks some kernel aware debuggers and debuggers that rely on removing\r
+the static qualifier. */\r
 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */\r
 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */\r
+PRIVILEGED_DATA static List_t xDelayedTaskList1;                                               /*< Delayed tasks. */\r
+PRIVILEGED_DATA static List_t xDelayedTaskList2;                                               /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */\r
 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList;                            /*< Points to the delayed task list currently being used. */\r
 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList;            /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */\r
 PRIVILEGED_DATA static List_t xPendingReadyList;                                               /*< Tasks that have been readied while the scheduler was suspended.  They will be moved to the ready list when the scheduler is resumed. */\r
 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList;                            /*< Points to the delayed task list currently being used. */\r
 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList;            /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */\r
 PRIVILEGED_DATA static List_t xPendingReadyList;                                               /*< Tasks that have been readied while the scheduler was suspended.  They will be moved to the ready list when the scheduler is resumed. */\r
@@ -365,12 +360,18 @@ PRIVILEGED_DATA static List_t xPendingReadyList;                                          /*< Tasks that have been r
 \r
 #endif\r
 \r
 \r
 #endif\r
 \r
+/* Global POSIX errno. Its value is changed upon context switching to match\r
+the errno of the currently running task. */\r
+#if ( configUSE_POSIX_ERRNO == 1 )\r
+       int FreeRTOS_errno = 0;\r
+#endif\r
+\r
 /* Other file private variables. --------------------------------*/\r
 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks     = ( UBaseType_t ) 0U;\r
 PRIVILEGED_DATA static volatile TickType_t xTickCount                          = ( TickType_t ) configINITIAL_TICK_COUNT;\r
 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority                 = tskIDLE_PRIORITY;\r
 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning           = pdFALSE;\r
 /* Other file private variables. --------------------------------*/\r
 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks     = ( UBaseType_t ) 0U;\r
 PRIVILEGED_DATA static volatile TickType_t xTickCount                          = ( TickType_t ) configINITIAL_TICK_COUNT;\r
 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority                 = tskIDLE_PRIORITY;\r
 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning           = pdFALSE;\r
-PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks                      = ( UBaseType_t ) 0U;\r
+PRIVILEGED_DATA static volatile TickType_t xPendedTicks                        = ( TickType_t ) 0U;\r
 PRIVILEGED_DATA static volatile BaseType_t xYieldPending                       = pdFALSE;\r
 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows                     = ( BaseType_t ) 0;\r
 PRIVILEGED_DATA static UBaseType_t uxTaskNumber                                        = ( UBaseType_t ) 0U;\r
 PRIVILEGED_DATA static volatile BaseType_t xYieldPending                       = pdFALSE;\r
 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows                     = ( BaseType_t ) 0;\r
 PRIVILEGED_DATA static UBaseType_t uxTaskNumber                                        = ( UBaseType_t ) 0U;\r
@@ -387,6 +388,15 @@ when the scheduler is unsuspended.  The pending ready list itself can only be
 accessed from a critical section. */\r
 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended       = ( UBaseType_t ) pdFALSE;\r
 \r
 accessed from a critical section. */\r
 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended       = ( UBaseType_t ) pdFALSE;\r
 \r
+#if ( configGENERATE_RUN_TIME_STATS == 1 )\r
+\r
+       /* Do not move these variables to function scope as doing so prevents the\r
+       code working with debuggers that need to remove the static qualifier. */\r
+       PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL;     /*< Holds the value of a timer/counter the last time a task was switched in. */\r
+       PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL;           /*< Holds the total amount of execution time as defined by the run time counter clock. */\r
+\r
+#endif\r
+\r
 /*lint -restore */\r
 \r
 /*-----------------------------------------------------------*/\r
 /*lint -restore */\r
 \r
 /*-----------------------------------------------------------*/\r
@@ -497,7 +507,7 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseT
  * This function determines the 'high water mark' of the task stack by\r
  * determining how much of the stack remains at the original preset value.\r
  */\r
  * This function determines the 'high water mark' of the task stack by\r
  * determining how much of the stack remains at the original preset value.\r
  */\r
-#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )\r
+#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )\r
 \r
        static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;\r
 \r
 \r
        static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;\r
 \r
@@ -607,7 +617,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
                                task was created statically in case the task is later deleted. */\r
                                pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;\r
                        }\r
                                task was created statically in case the task is later deleted. */\r
                                pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;\r
                        }\r
-                       #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
+                       #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
 \r
                        prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );\r
                        prvAddNewTaskToReadyList( pxNewTCB );\r
 \r
                        prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );\r
                        prvAddNewTaskToReadyList( pxNewTCB );\r
@@ -649,7 +659,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
                                task was created statically in case the task is later deleted. */\r
                                pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;\r
                        }\r
                                task was created statically in case the task is later deleted. */\r
                                pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;\r
                        }\r
-                       #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
+                       #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
 \r
                        prvInitialiseNewTask(   pxTaskDefinition->pvTaskCode,\r
                                                                        pxTaskDefinition->pcName,\r
 \r
                        prvInitialiseNewTask(   pxTaskDefinition->pvTaskCode,\r
                                                                        pxTaskDefinition->pcName,\r
@@ -690,14 +700,14 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
                                /* Store the stack location in the TCB. */\r
                                pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;\r
 \r
                                /* Store the stack location in the TCB. */\r
                                pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;\r
 \r
-                               #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
+                               #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )\r
                                {\r
                                        /* Tasks can be created statically or dynamically, so note\r
                                        this task had a statically allocated stack in case it is\r
                                        later deleted.  The TCB was allocated dynamically. */\r
                                        pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;\r
                                }\r
                                {\r
                                        /* Tasks can be created statically or dynamically, so note\r
                                        this task had a statically allocated stack in case it is\r
                                        later deleted.  The TCB was allocated dynamically. */\r
                                        pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;\r
                                }\r
-                               #endif\r
+                               #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
 \r
                                prvInitialiseNewTask(   pxTaskDefinition->pvTaskCode,\r
                                                                                pxTaskDefinition->pcName,\r
 \r
                                prvInitialiseNewTask(   pxTaskDefinition->pvTaskCode,\r
                                                                                pxTaskDefinition->pcName,\r
@@ -794,7 +804,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
                                task was created dynamically in case it is later deleted. */\r
                                pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;\r
                        }\r
                                task was created dynamically in case it is later deleted. */\r
                                pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;\r
                        }\r
-                       #endif /* configSUPPORT_STATIC_ALLOCATION */\r
+                       #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
 \r
                        prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );\r
                        prvAddNewTaskToReadyList( pxNewTCB );\r
 \r
                        prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );\r
                        prvAddNewTaskToReadyList( pxNewTCB );\r
@@ -837,8 +847,6 @@ UBaseType_t x;
                uxPriority &= ~portPRIVILEGE_BIT;\r
        #endif /* portUSING_MPU_WRAPPERS == 1 */\r
 \r
                uxPriority &= ~portPRIVILEGE_BIT;\r
        #endif /* portUSING_MPU_WRAPPERS == 1 */\r
 \r
-       configASSERT( pcName );\r
-\r
        /* Avoid dependency on memset() if it is not required. */\r
        #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )\r
        {\r
        /* Avoid dependency on memset() if it is not required. */\r
        #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )\r
        {\r
@@ -881,26 +889,35 @@ UBaseType_t x;
        #endif /* portSTACK_GROWTH */\r
 \r
        /* Store the task name in the TCB. */\r
        #endif /* portSTACK_GROWTH */\r
 \r
        /* Store the task name in the TCB. */\r
-       for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )\r
+       if( pcName != NULL )\r
        {\r
        {\r
-               pxNewTCB->pcTaskName[ x ] = pcName[ x ];\r
-\r
-               /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than\r
-               configMAX_TASK_NAME_LEN characters just in case the memory after the\r
-               string is not accessible (extremely unlikely). */\r
-               if( pcName[ x ] == ( char ) 0x00 )\r
-               {\r
-                       break;\r
-               }\r
-               else\r
+               for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )\r
                {\r
                {\r
-                       mtCOVERAGE_TEST_MARKER();\r
+                       pxNewTCB->pcTaskName[ x ] = pcName[ x ];\r
+\r
+                       /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than\r
+                       configMAX_TASK_NAME_LEN characters just in case the memory after the\r
+                       string is not accessible (extremely unlikely). */\r
+                       if( pcName[ x ] == ( char ) 0x00 )\r
+                       {\r
+                               break;\r
+                       }\r
+                       else\r
+                       {\r
+                               mtCOVERAGE_TEST_MARKER();\r
+                       }\r
                }\r
                }\r
-       }\r
 \r
 \r
-       /* Ensure the name string is terminated in the case that the string length\r
-       was greater or equal to configMAX_TASK_NAME_LEN. */\r
-       pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';\r
+               /* Ensure the name string is terminated in the case that the string length\r
+               was greater or equal to configMAX_TASK_NAME_LEN. */\r
+               pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';\r
+       }\r
+       else\r
+       {\r
+               /* The task has not been given a name, so just ensure there is a NULL\r
+               terminator when it is read out. */\r
+               pxNewTCB->pcTaskName[ 0 ] = 0x00;\r
+       }\r
 \r
        /* This is used as an array index so must ensure it's not too large.  First\r
        remove the privilege bit if one is present. */\r
 \r
        /* This is used as an array index so must ensure it's not too large.  First\r
        remove the privilege bit if one is present. */\r
@@ -979,7 +996,9 @@ UBaseType_t x;
 \r
        #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
        {\r
 \r
        #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
        {\r
-               /* Initialise this task's Newlib reent structure. */\r
+               /* Initialise this task's Newlib reent structure.\r
+               See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+               for additional information. */\r
                _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );\r
        }\r
        #endif\r
                _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );\r
        }\r
        #endif\r
@@ -996,11 +1015,49 @@ UBaseType_t x;
        the top of stack variable is updated. */\r
        #if( portUSING_MPU_WRAPPERS == 1 )\r
        {\r
        the top of stack variable is updated. */\r
        #if( portUSING_MPU_WRAPPERS == 1 )\r
        {\r
-               pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+               /* If the port has capability to detect stack overflow,\r
+               pass the stack end address to the stack initialization\r
+               function as well. */\r
+               #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )\r
+               {\r
+                       #if( portSTACK_GROWTH < 0 )\r
+                       {\r
+                               pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+                       }\r
+                       #else /* portSTACK_GROWTH */\r
+                       {\r
+                               pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+                       }\r
+                       #endif /* portSTACK_GROWTH */\r
+               }\r
+               #else /* portHAS_STACK_OVERFLOW_CHECKING */\r
+               {\r
+                       pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+               }\r
+               #endif /* portHAS_STACK_OVERFLOW_CHECKING */\r
        }\r
        #else /* portUSING_MPU_WRAPPERS */\r
        {\r
        }\r
        #else /* portUSING_MPU_WRAPPERS */\r
        {\r
-               pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );\r
+               /* If the port has capability to detect stack overflow,\r
+               pass the stack end address to the stack initialization\r
+               function as well. */\r
+               #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )\r
+               {\r
+                       #if( portSTACK_GROWTH < 0 )\r
+                       {\r
+                               pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );\r
+                       }\r
+                       #else /* portSTACK_GROWTH */\r
+                       {\r
+                               pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );\r
+                       }\r
+                       #endif /* portSTACK_GROWTH */\r
+               }\r
+               #else /* portHAS_STACK_OVERFLOW_CHECKING */\r
+               {\r
+                       pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );\r
+               }\r
+               #endif /* portHAS_STACK_OVERFLOW_CHECKING */\r
        }\r
        #endif /* portUSING_MPU_WRAPPERS */\r
 \r
        }\r
        #endif /* portUSING_MPU_WRAPPERS */\r
 \r
@@ -1112,7 +1169,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
                        being deleted. */\r
                        pxTCB = prvGetTCBFromHandle( xTaskToDelete );\r
 \r
                        being deleted. */\r
                        pxTCB = prvGetTCBFromHandle( xTaskToDelete );\r
 \r
-                       /* Remove task from the ready list. */\r
+                       /* Remove task from the ready/delayed list. */\r
                        if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                        {\r
                                taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
                        if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                        {\r
                                taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
@@ -1152,6 +1209,10 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
                                check the xTasksWaitingTermination list. */\r
                                ++uxDeletedTasksWaitingCleanUp;\r
 \r
                                check the xTasksWaitingTermination list. */\r
                                ++uxDeletedTasksWaitingCleanUp;\r
 \r
+                               /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as\r
+                               portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */\r
+                               traceTASK_DELETE( pxTCB );\r
+\r
                                /* The pre-delete hook is primarily for the Windows simulator,\r
                                in which Windows specific clean up operations are performed,\r
                                after which it is not possible to yield away from this task -\r
                                /* The pre-delete hook is primarily for the Windows simulator,\r
                                in which Windows specific clean up operations are performed,\r
                                after which it is not possible to yield away from this task -\r
@@ -1162,14 +1223,13 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
                        else\r
                        {\r
                                --uxCurrentNumberOfTasks;\r
                        else\r
                        {\r
                                --uxCurrentNumberOfTasks;\r
+                               traceTASK_DELETE( pxTCB );\r
                                prvDeleteTCB( pxTCB );\r
 \r
                                /* Reset the next expected unblock time in case it referred to\r
                                the task that has just been deleted. */\r
                                prvResetNextTaskUnblockTime();\r
                        }\r
                                prvDeleteTCB( pxTCB );\r
 \r
                                /* Reset the next expected unblock time in case it referred to\r
                                the task that has just been deleted. */\r
                                prvResetNextTaskUnblockTime();\r
                        }\r
-\r
-                       traceTASK_DELETE( pxTCB );\r
                }\r
                taskEXIT_CRITICAL();\r
 \r
                }\r
                taskEXIT_CRITICAL();\r
 \r
@@ -1321,7 +1381,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
 #endif /* INCLUDE_vTaskDelay */\r
 /*-----------------------------------------------------------*/\r
 \r
 #endif /* INCLUDE_vTaskDelay */\r
 /*-----------------------------------------------------------*/\r
 \r
-#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) )\r
+#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )\r
 \r
        eTaskState eTaskGetState( TaskHandle_t xTask )\r
        {\r
 \r
        eTaskState eTaskGetState( TaskHandle_t xTask )\r
        {\r
@@ -1989,7 +2049,9 @@ BaseType_t xReturn;
                #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
                {\r
                        /* Switch Newlib's _impure_ptr variable to point to the _reent\r
                #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
                {\r
                        /* Switch Newlib's _impure_ptr variable to point to the _reent\r
-                       structure specific to the task that will run first. */\r
+                       structure specific to the task that will run first.\r
+                       See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+                       for additional information. */\r
                        _impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
                }\r
                #endif /* configUSE_NEWLIB_REENTRANT */\r
                        _impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
                }\r
                #endif /* configUSE_NEWLIB_REENTRANT */\r
@@ -2052,6 +2114,7 @@ void vTaskSuspendAll( void )
        post in the FreeRTOS support forum before reporting this as a bug! -\r
        http://goo.gl/wu4acr */\r
        ++uxSchedulerSuspended;\r
        post in the FreeRTOS support forum before reporting this as a bug! -\r
        http://goo.gl/wu4acr */\r
        ++uxSchedulerSuspended;\r
+       portMEMORY_BARRIER();\r
 }\r
 /*----------------------------------------------------------*/\r
 \r
 }\r
 /*----------------------------------------------------------*/\r
 \r
@@ -2122,6 +2185,7 @@ BaseType_t xTaskResumeAll( void )
 {\r
 TCB_t *pxTCB = NULL;\r
 BaseType_t xAlreadyYielded = pdFALSE;\r
 {\r
 TCB_t *pxTCB = NULL;\r
 BaseType_t xAlreadyYielded = pdFALSE;\r
+TickType_t xTicksToNextUnblockTime;\r
 \r
        /* If uxSchedulerSuspended is zero then this function does not match a\r
        previous call to vTaskSuspendAll(). */\r
 \r
        /* If uxSchedulerSuspended is zero then this function does not match a\r
        previous call to vTaskSuspendAll(). */\r
@@ -2176,30 +2240,51 @@ BaseType_t xAlreadyYielded = pdFALSE;
                                they should be processed now.  This ensures the tick count does\r
                                not     slip, and that any delayed tasks are resumed at the correct\r
                                time. */\r
                                they should be processed now.  This ensures the tick count does\r
                                not     slip, and that any delayed tasks are resumed at the correct\r
                                time. */\r
+                               while( xPendedTicks > ( TickType_t ) 0 )\r
                                {\r
                                {\r
-                                       UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */\r
+                                       /* Calculate how far into the future the next task will\r
+                                       leave the Blocked state because its timeout expired.  If\r
+                                       there are no tasks due to leave the blocked state between\r
+                                       the time now and the time at which the tick count overflows\r
+                                       then xNextTaskUnblockTime will the tick overflow time.\r
+                                       This means xNextTaskUnblockTime can never be less than\r
+                                       xTickCount, and the following can therefore not\r
+                                       underflow. */\r
+                                       configASSERT( xNextTaskUnblockTime >= xTickCount );\r
+                                       xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount;\r
 \r
 \r
-                                       if( uxPendedCounts > ( UBaseType_t ) 0U )\r
+                                       /* Don't want to move the tick count more than the number\r
+                                       of ticks that are pending, so cap if necessary. */\r
+                                       if( xTicksToNextUnblockTime > xPendedTicks )\r
                                        {\r
                                        {\r
-                                               do\r
-                                               {\r
-                                                       if( xTaskIncrementTick() != pdFALSE )\r
-                                                       {\r
-                                                               xYieldPending = pdTRUE;\r
-                                                       }\r
-                                                       else\r
-                                                       {\r
-                                                               mtCOVERAGE_TEST_MARKER();\r
-                                                       }\r
-                                                       --uxPendedCounts;\r
-                                               } while( uxPendedCounts > ( UBaseType_t ) 0U );\r
+                                               xTicksToNextUnblockTime = xPendedTicks;\r
+                                       }\r
 \r
 \r
-                                               uxPendedTicks = 0;\r
+                                       if( xTicksToNextUnblockTime == 0 )\r
+                                       {\r
+                                               /* xTicksToNextUnblockTime could be zero if the tick\r
+                                               count is about to overflow and xTicksToNetUnblockTime\r
+                                               holds the time at which the tick count will overflow\r
+                                               (rather than the time at which the next task will\r
+                                               unblock).  Set to 1 otherwise xPendedTicks won't be\r
+                                               decremented below. */\r
+                                               xTicksToNextUnblockTime = ( TickType_t ) 1;\r
                                        }\r
                                        }\r
-                                       else\r
+                                       else if( xTicksToNextUnblockTime > ( TickType_t ) 1 )\r
                                        {\r
                                        {\r
-                                               mtCOVERAGE_TEST_MARKER();\r
+                                               /* Move the tick count one short of the next unblock\r
+                                               time, then call xTaskIncrementTick() to move the tick\r
+                                               count up to the next unblock time to unblock the task,\r
+                                               if any.  This will also swap the blocked task and\r
+                                               overflow blocked task lists if necessary. */\r
+                                               xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 );\r
                                        }\r
                                        }\r
+                                       xYieldPending |= xTaskIncrementTick();\r
+\r
+                                       /* Adjust for the number of ticks just added to\r
+                                       xTickCount and go around the loop again if\r
+                                       xTicksToCatchUp is still greater than 0. */\r
+                                       xPendedTicks -= xTicksToNextUnblockTime;\r
                                }\r
 \r
                                if( xYieldPending != pdFALSE )\r
                                }\r
 \r
                                if( xYieldPending != pdFALSE )\r
@@ -2533,6 +2618,109 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
 #endif /* configUSE_TICKLESS_IDLE */\r
 /*----------------------------------------------------------*/\r
 \r
 #endif /* configUSE_TICKLESS_IDLE */\r
 /*----------------------------------------------------------*/\r
 \r
+BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )\r
+{\r
+BaseType_t xYieldRequired = pdFALSE;\r
+\r
+       /* Must not be called with the scheduler suspended as the implementation\r
+       relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */\r
+       configASSERT( uxSchedulerSuspended == 0 );\r
+\r
+       /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when\r
+       the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */\r
+       vTaskSuspendAll();\r
+       xPendedTicks += xTicksToCatchUp;\r
+       xYieldRequired = xTaskResumeAll();\r
+\r
+       return xYieldRequired;\r
+}\r
+/*----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_xTaskAbortDelay == 1 )\r
+\r
+       BaseType_t xTaskAbortDelayFromISR( TaskHandle_t xTask, BaseType_t * const pxHigherPriorityTaskWoken )\r
+       {\r
+       TCB_t *pxTCB = xTask;\r
+       BaseType_t xReturn;\r
+       UBaseType_t uxSavedInterruptStatus;\r
+\r
+               configASSERT( pxTCB );\r
+\r
+               /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+               system call (or maximum API call) interrupt priority.  Interrupts that are\r
+               above the maximum system call priority are kept permanently enabled, even\r
+               when the RTOS kernel is in a critical section, but cannot make any calls to\r
+               FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
+               then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+               failure if a FreeRTOS API function is called from an interrupt that has been\r
+               assigned a priority above the configured maximum system call priority.\r
+               Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+               that have been assigned a priority at or (logically) below the maximum\r
+               system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
+               safe API to ensure interrupt entry is as fast and as simple as possible.\r
+               More information (albeit Cortex-M specific) is provided on the following\r
+               link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+               portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+               uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+               {\r
+                       /* A task can only be prematurely removed from the Blocked state if\r
+                       it is actually in the Blocked state. */\r
+                       if( eTaskGetState( xTask ) == eBlocked )\r
+                       {\r
+                               xReturn = pdPASS;\r
+\r
+                               /* Remove the reference to the task from the blocked list.  A higher\r
+                               priority interrupt won't touch the xStateListItem because of the\r
+                               critical section. */\r
+                               ( void ) uxListRemove( &( pxTCB->xStateListItem ) );\r
+\r
+                               /* Is the task waiting on an event also?  If so remove it from\r
+                               the event list too. */\r
+                               if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
+                               {\r
+                                       ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+\r
+                                       /* This lets the task know it was forcibly removed from the\r
+                                       blocked state so it should not re-evaluate its block time and\r
+                                       then block again. */\r
+                                       pxTCB->ucDelayAborted = pdTRUE;\r
+                               }\r
+                               else\r
+                               {\r
+                                       mtCOVERAGE_TEST_MARKER();\r
+                               }\r
+\r
+                               /* Place the unblocked task into the appropriate ready list. */\r
+                               prvAddTaskToReadyList( pxTCB );\r
+\r
+                               if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )\r
+                               {\r
+                                       if( pxHigherPriorityTaskWoken != NULL )\r
+                                       {\r
+                                               /* Pend the yield to be performed when the scheduler\r
+                                               is unsuspended. */\r
+                                               *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                       }\r
+                               }\r
+                               else\r
+                               {\r
+                                       mtCOVERAGE_TEST_MARKER();\r
+                               }\r
+                       }\r
+                       else\r
+                       {\r
+                               xReturn = pdFAIL;\r
+                       }\r
+               }\r
+               portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+               return xReturn;\r
+       }\r
+\r
+#endif\r
+/*----------------------------------------------------------*/\r
+\r
 #if ( INCLUDE_xTaskAbortDelay == 1 )\r
 \r
        BaseType_t xTaskAbortDelay( TaskHandle_t xTask )\r
 #if ( INCLUDE_xTaskAbortDelay == 1 )\r
 \r
        BaseType_t xTaskAbortDelay( TaskHandle_t xTask )\r
@@ -2564,6 +2752,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
                                        if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
                                        {\r
                                                ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
                                        if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
                                        {\r
                                                ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+\r
+                                               /* This lets the task know it was forcibly removed from the\r
+                                               blocked state so it should not re-evaluate its block time and\r
+                                               then block again. */\r
                                                pxTCB->ucDelayAborted = pdTRUE;\r
                                        }\r
                                        else\r
                                                pxTCB->ucDelayAborted = pdTRUE;\r
                                        }\r
                                        else\r
@@ -2740,7 +2932,7 @@ BaseType_t xSwitchRequired = pdFALSE;
                {\r
                        /* Guard against the tick hook being called when the pended tick\r
                        count is being unwound (when the scheduler is being unlocked). */\r
                {\r
                        /* Guard against the tick hook being called when the pended tick\r
                        count is being unwound (when the scheduler is being unlocked). */\r
-                       if( uxPendedTicks == ( UBaseType_t ) 0U )\r
+                       if( xPendedTicks == ( TickType_t ) 0 )\r
                        {\r
                                vApplicationTickHook();\r
                        }\r
                        {\r
                                vApplicationTickHook();\r
                        }\r
@@ -2750,10 +2942,23 @@ BaseType_t xSwitchRequired = pdFALSE;
                        }\r
                }\r
                #endif /* configUSE_TICK_HOOK */\r
                        }\r
                }\r
                #endif /* configUSE_TICK_HOOK */\r
+\r
+               #if ( configUSE_PREEMPTION == 1 )\r
+               {\r
+                       if( xYieldPending != pdFALSE )\r
+                       {\r
+                               xSwitchRequired = pdTRUE;\r
+                       }\r
+                       else\r
+                       {\r
+                               mtCOVERAGE_TEST_MARKER();\r
+                       }\r
+               }\r
+               #endif /* configUSE_PREEMPTION */\r
        }\r
        else\r
        {\r
        }\r
        else\r
        {\r
-               ++uxPendedTicks;\r
+               ++xPendedTicks;\r
 \r
                /* The tick hook gets called at regular intervals, even if the\r
                scheduler is locked. */\r
 \r
                /* The tick hook gets called at regular intervals, even if the\r
                scheduler is locked. */\r
@@ -2764,19 +2969,6 @@ BaseType_t xSwitchRequired = pdFALSE;
                #endif\r
        }\r
 \r
                #endif\r
        }\r
 \r
-       #if ( configUSE_PREEMPTION == 1 )\r
-       {\r
-               if( xYieldPending != pdFALSE )\r
-               {\r
-                       xSwitchRequired = pdTRUE;\r
-               }\r
-               else\r
-               {\r
-                       mtCOVERAGE_TEST_MARKER();\r
-               }\r
-       }\r
-       #endif /* configUSE_PREEMPTION */\r
-\r
        return xSwitchRequired;\r
 }\r
 /*-----------------------------------------------------------*/\r
        return xSwitchRequired;\r
 }\r
 /*-----------------------------------------------------------*/\r
@@ -2814,24 +3006,17 @@ BaseType_t xSwitchRequired = pdFALSE;
 \r
        TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )\r
        {\r
 \r
        TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )\r
        {\r
-       TCB_t *xTCB;\r
+       TCB_t *pxTCB;\r
        TaskHookFunction_t xReturn;\r
 \r
        TaskHookFunction_t xReturn;\r
 \r
-               /* If xTask is NULL then we are setting our own task hook. */\r
-               if( xTask == NULL )\r
-               {\r
-                       xTCB = ( TCB_t * ) pxCurrentTCB;\r
-               }\r
-               else\r
-               {\r
-                       xTCB = xTask;\r
-               }\r
+               /* If xTask is NULL then set the calling task's hook. */\r
+               pxTCB = prvGetTCBFromHandle( xTask );\r
 \r
                /* Save the hook function in the TCB.  A critical section is required as\r
                the value can be accessed from an interrupt. */\r
                taskENTER_CRITICAL();\r
                {\r
 \r
                /* Save the hook function in the TCB.  A critical section is required as\r
                the value can be accessed from an interrupt. */\r
                taskENTER_CRITICAL();\r
                {\r
-                       xReturn = xTCB->pxTaskTag;\r
+                       xReturn = pxTCB->pxTaskTag;\r
                }\r
                taskEXIT_CRITICAL();\r
 \r
                }\r
                taskEXIT_CRITICAL();\r
 \r
@@ -2841,6 +3026,31 @@ BaseType_t xSwitchRequired = pdFALSE;
 #endif /* configUSE_APPLICATION_TASK_TAG */\r
 /*-----------------------------------------------------------*/\r
 \r
 #endif /* configUSE_APPLICATION_TASK_TAG */\r
 /*-----------------------------------------------------------*/\r
 \r
+#if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
+\r
+       TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )\r
+       {\r
+       TCB_t *pxTCB;\r
+       TaskHookFunction_t xReturn;\r
+       UBaseType_t uxSavedInterruptStatus;\r
+\r
+               /* If xTask is NULL then set the calling task's hook. */\r
+               pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+               /* Save the hook function in the TCB.  A critical section is required as\r
+               the value can be accessed from an interrupt. */\r
+               uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+               {\r
+                       xReturn = pxTCB->pxTaskTag;\r
+               }\r
+               portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+               return xReturn;\r
+       }\r
+\r
+#endif /* configUSE_APPLICATION_TASK_TAG */\r
+/*-----------------------------------------------------------*/\r
+\r
 #if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
 \r
        BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )\r
 #if ( configUSE_APPLICATION_TASK_TAG == 1 )\r
 \r
        BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )\r
@@ -2888,46 +3098,59 @@ void vTaskSwitchContext( void )
 \r
                #if ( configGENERATE_RUN_TIME_STATS == 1 )\r
                {\r
 \r
                #if ( configGENERATE_RUN_TIME_STATS == 1 )\r
                {\r
-                       PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL;     /*< Holds the value of a timer/counter the last time a task was switched in. */\r
-                       PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL;           /*< Holds the total amount of execution time as defined by the run time counter clock. */\r
-\r
-                               #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE\r
-                                       portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );\r
-                               #else\r
-                                       ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();\r
-                               #endif\r
+                       #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE\r
+                               portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );\r
+                       #else\r
+                               ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();\r
+                       #endif\r
 \r
 \r
-                               /* Add the amount of time the task has been running to the\r
-                               accumulated time so far.  The time the task started running was\r
-                               stored in ulTaskSwitchedInTime.  Note that there is no overflow\r
-                               protection here so count values are only valid until the timer\r
-                               overflows.  The guard against negative values is to protect\r
-                               against suspect run time stat counter implementations - which\r
-                               are provided by the application, not the kernel. */\r
-                               if( ulTotalRunTime > ulTaskSwitchedInTime )\r
-                               {\r
-                                       pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );\r
-                               }\r
-                               else\r
-                               {\r
-                                       mtCOVERAGE_TEST_MARKER();\r
-                               }\r
-                               ulTaskSwitchedInTime = ulTotalRunTime;\r
+                       /* Add the amount of time the task has been running to the\r
+                       accumulated time so far.  The time the task started running was\r
+                       stored in ulTaskSwitchedInTime.  Note that there is no overflow\r
+                       protection here so count values are only valid until the timer\r
+                       overflows.  The guard against negative values is to protect\r
+                       against suspect run time stat counter implementations - which\r
+                       are provided by the application, not the kernel. */\r
+                       if( ulTotalRunTime > ulTaskSwitchedInTime )\r
+                       {\r
+                               pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );\r
+                       }\r
+                       else\r
+                       {\r
+                               mtCOVERAGE_TEST_MARKER();\r
+                       }\r
+                       ulTaskSwitchedInTime = ulTotalRunTime;\r
                }\r
                #endif /* configGENERATE_RUN_TIME_STATS */\r
 \r
                /* Check for stack overflow, if configured. */\r
                taskCHECK_FOR_STACK_OVERFLOW();\r
 \r
                }\r
                #endif /* configGENERATE_RUN_TIME_STATS */\r
 \r
                /* Check for stack overflow, if configured. */\r
                taskCHECK_FOR_STACK_OVERFLOW();\r
 \r
+               /* Before the currently running task is switched out, save its errno. */\r
+               #if( configUSE_POSIX_ERRNO == 1 )\r
+               {\r
+                       pxCurrentTCB->iTaskErrno = FreeRTOS_errno;\r
+               }\r
+               #endif\r
+\r
                /* Select a new task to run using either the generic C or port\r
                optimised asm code. */\r
                taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too.  Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
                traceTASK_SWITCHED_IN();\r
 \r
                /* Select a new task to run using either the generic C or port\r
                optimised asm code. */\r
                taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too.  Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */\r
                traceTASK_SWITCHED_IN();\r
 \r
+               /* After the new task is switched in, update the global errno. */\r
+               #if( configUSE_POSIX_ERRNO == 1 )\r
+               {\r
+                       FreeRTOS_errno = pxCurrentTCB->iTaskErrno;\r
+               }\r
+               #endif\r
+\r
                #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
                {\r
                        /* Switch Newlib's _impure_ptr variable to point to the _reent\r
                #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
                {\r
                        /* Switch Newlib's _impure_ptr variable to point to the _reent\r
-                       structure specific to this task. */\r
+                       structure specific to this task.\r
+                       See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+                       for additional information. */\r
                        _impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
                }\r
                #endif /* configUSE_NEWLIB_REENTRANT */\r
                        _impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
                }\r
                #endif /* configUSE_NEWLIB_REENTRANT */\r
@@ -3035,6 +3258,20 @@ BaseType_t xReturn;
        {\r
                ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );\r
                prvAddTaskToReadyList( pxUnblockedTCB );\r
        {\r
                ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );\r
                prvAddTaskToReadyList( pxUnblockedTCB );\r
+\r
+               #if( configUSE_TICKLESS_IDLE != 0 )\r
+               {\r
+                       /* If a task is blocked on a kernel object then xNextTaskUnblockTime\r
+                       might be set to the blocked task's time out time.  If the task is\r
+                       unblocked for a reason other than a timeout xNextTaskUnblockTime is\r
+                       normally left unchanged, because it is automatically reset to a new\r
+                       value when the tick count equals xNextTaskUnblockTime.  However if\r
+                       tickless idling is used it might be more important to enter sleep mode\r
+                       at the earliest possible time - so reset xNextTaskUnblockTime here to\r
+                       ensure it is updated at the earliest possible time. */\r
+                       prvResetNextTaskUnblockTime();\r
+               }\r
+               #endif\r
        }\r
        else\r
        {\r
        }\r
        else\r
        {\r
@@ -3059,20 +3296,6 @@ BaseType_t xReturn;
                xReturn = pdFALSE;\r
        }\r
 \r
                xReturn = pdFALSE;\r
        }\r
 \r
-       #if( configUSE_TICKLESS_IDLE != 0 )\r
-       {\r
-               /* If a task is blocked on a kernel object then xNextTaskUnblockTime\r
-               might be set to the blocked task's time out time.  If the task is\r
-               unblocked for a reason other than a timeout xNextTaskUnblockTime is\r
-               normally left unchanged, because it is automatically reset to a new\r
-               value when the tick count equals xNextTaskUnblockTime.  However if\r
-               tickless idling is used it might be more important to enter sleep mode\r
-               at the earliest possible time - so reset xNextTaskUnblockTime here to\r
-               ensure it is updated at the earliest possible time. */\r
-               prvResetNextTaskUnblockTime();\r
-       }\r
-       #endif\r
-\r
        return xReturn;\r
 }\r
 /*-----------------------------------------------------------*/\r
        return xReturn;\r
 }\r
 /*-----------------------------------------------------------*/\r
@@ -3094,6 +3317,20 @@ TCB_t *pxUnblockedTCB;
        configASSERT( pxUnblockedTCB );\r
        ( void ) uxListRemove( pxEventListItem );\r
 \r
        configASSERT( pxUnblockedTCB );\r
        ( void ) uxListRemove( pxEventListItem );\r
 \r
+       #if( configUSE_TICKLESS_IDLE != 0 )\r
+       {\r
+               /* If a task is blocked on a kernel object then xNextTaskUnblockTime\r
+               might be set to the blocked task's time out time.  If the task is\r
+               unblocked for a reason other than a timeout xNextTaskUnblockTime is\r
+               normally left unchanged, because it is automatically reset to a new\r
+               value when the tick count equals xNextTaskUnblockTime.  However if\r
+               tickless idling is used it might be more important to enter sleep mode\r
+               at the earliest possible time - so reset xNextTaskUnblockTime here to\r
+               ensure it is updated at the earliest possible time. */\r
+               prvResetNextTaskUnblockTime();\r
+       }\r
+       #endif\r
+\r
        /* Remove the task from the delayed list and add it to the ready list.  The\r
        scheduler is suspended so interrupts will not be accessing the ready\r
        lists. */\r
        /* Remove the task from the delayed list and add it to the ready list.  The\r
        scheduler is suspended so interrupts will not be accessing the ready\r
        lists. */\r
@@ -3260,7 +3497,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
        /* In case a task that has a secure context deletes itself, in which case\r
        the idle task is responsible for deleting the task's secure context, if\r
        any. */\r
        /* In case a task that has a secure context deletes itself, in which case\r
        the idle task is responsible for deleting the task's secure context, if\r
        any. */\r
-       portTASK_CALLS_SECURE_FUNCTIONS();\r
+       portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );\r
 \r
        for( ;; )\r
        {\r
 \r
        for( ;; )\r
        {\r
@@ -3374,6 +3611,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
        const UBaseType_t uxNonApplicationTasks = 1;\r
        eSleepModeStatus eReturn = eStandardSleep;\r
 \r
        const UBaseType_t uxNonApplicationTasks = 1;\r
        eSleepModeStatus eReturn = eStandardSleep;\r
 \r
+               /* This function must be called from a critical section. */\r
+\r
                if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )\r
                {\r
                        /* A task was made ready while the scheduler was suspended. */\r
                if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )\r
                {\r
                        /* A task was made ready while the scheduler was suspended. */\r
@@ -3464,8 +3703,6 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
 static void prvInitialiseTaskLists( void )\r
 {\r
 UBaseType_t uxPriority;\r
 static void prvInitialiseTaskLists( void )\r
 {\r
 UBaseType_t uxPriority;\r
-PRIVILEGED_DATA static List_t xDelayedTaskList1;       /*< Delayed tasks. */\r
-PRIVILEGED_DATA static List_t xDelayedTaskList2;       /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */\r
 \r
        for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )\r
        {\r
 \r
        for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )\r
        {\r
@@ -3653,7 +3890,7 @@ static void prvCheckTasksWaitingTermination( void )
 #endif /* configUSE_TRACE_FACILITY */\r
 /*-----------------------------------------------------------*/\r
 \r
 #endif /* configUSE_TRACE_FACILITY */\r
 /*-----------------------------------------------------------*/\r
 \r
-#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) )\r
+#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )\r
 \r
        static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )\r
        {\r
 \r
        static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )\r
        {\r
@@ -3670,7 +3907,47 @@ static void prvCheckTasksWaitingTermination( void )
                return ( configSTACK_DEPTH_TYPE ) ulCount;\r
        }\r
 \r
                return ( configSTACK_DEPTH_TYPE ) ulCount;\r
        }\r
 \r
-#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */\r
+#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )\r
+\r
+       /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the\r
+       same except for their return type.  Using configSTACK_DEPTH_TYPE allows the\r
+       user to determine the return type.  It gets around the problem of the value\r
+       overflowing on 8-bit types without breaking backward compatibility for\r
+       applications that expect an 8-bit return type. */\r
+       configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )\r
+       {\r
+       TCB_t *pxTCB;\r
+       uint8_t *pucEndOfStack;\r
+       configSTACK_DEPTH_TYPE uxReturn;\r
+\r
+               /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are\r
+               the same except for their return type.  Using configSTACK_DEPTH_TYPE\r
+               allows the user to determine the return type.  It gets around the\r
+               problem of the value overflowing on 8-bit types without breaking\r
+               backward compatibility for applications that expect an 8-bit return\r
+               type. */\r
+\r
+               pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+               #if portSTACK_GROWTH < 0\r
+               {\r
+                       pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;\r
+               }\r
+               #else\r
+               {\r
+                       pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;\r
+               }\r
+               #endif\r
+\r
+               uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );\r
+\r
+               return uxReturn;\r
+       }\r
+\r
+#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */\r
 /*-----------------------------------------------------------*/\r
 \r
 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )\r
 /*-----------------------------------------------------------*/\r
 \r
 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )\r
@@ -3711,7 +3988,9 @@ static void prvCheckTasksWaitingTermination( void )
                portCLEAN_UP_TCB( pxTCB );\r
 \r
                /* Free up the memory allocated by the scheduler for the task.  It is up\r
                portCLEAN_UP_TCB( pxTCB );\r
 \r
                /* Free up the memory allocated by the scheduler for the task.  It is up\r
-               to the task to free any memory allocated at the application level. */\r
+               to the task to free any memory allocated at the application level.\r
+               See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+               for additional information. */\r
                #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
                {\r
                        _reclaim_reent( &( pxTCB->xNewLib_reent ) );\r
                #if ( configUSE_NEWLIB_REENTRANT == 1 )\r
                {\r
                        _reclaim_reent( &( pxTCB->xNewLib_reent ) );\r
@@ -3861,7 +4140,10 @@ TCB_t *pxTCB;
                                {\r
                                        if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                                        {\r
                                {\r
                                        if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                                        {\r
-                                               taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );\r
+                                               /* It is known that the task is in its ready list so\r
+                                               there is no need to check again and the port level\r
+                                               reset macro can be called directly. */\r
+                                               portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );\r
                                        }\r
                                        else\r
                                        {\r
                                        }\r
                                        else\r
                                        {\r
@@ -3941,7 +4223,7 @@ TCB_t *pxTCB;
                                        the mutex.  If the mutex is held by a task then it cannot be\r
                                        given from an interrupt, and if a mutex is given by the\r
                                        holding task then it must be the running state task.  Remove\r
                                        the mutex.  If the mutex is held by a task then it cannot be\r
                                        given from an interrupt, and if a mutex is given by the\r
                                        holding task then it must be the running state task.  Remove\r
-                                       the holding task from the ready list. */\r
+                                       the holding task from the ready/delayed list. */\r
                                        if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                                        {\r
                                                taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
                                        if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                                        {\r
                                                taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
@@ -4062,7 +4344,10 @@ TCB_t *pxTCB;
                                        {\r
                                                if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                                                {\r
                                        {\r
                                                if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
                                                {\r
-                                                       taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
+                                                       /* It is known that the task is in its ready list so\r
+                                                       there is no need to check again and the port level\r
+                                                       reset macro can be called directly. */\r
+                                                       portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );\r
                                                }\r
                                                else\r
                                                {\r
                                                }\r
                                                else\r
                                                {\r
@@ -4916,7 +5201,6 @@ TickType_t uxReturn;
        }\r
 \r
 #endif /* configUSE_TASK_NOTIFICATIONS */\r
        }\r
 \r
 #endif /* configUSE_TASK_NOTIFICATIONS */\r
-\r
 /*-----------------------------------------------------------*/\r
 \r
 #if( configUSE_TASK_NOTIFICATIONS == 1 )\r
 /*-----------------------------------------------------------*/\r
 \r
 #if( configUSE_TASK_NOTIFICATIONS == 1 )\r
@@ -4950,6 +5234,41 @@ TickType_t uxReturn;
 #endif /* configUSE_TASK_NOTIFICATIONS */\r
 /*-----------------------------------------------------------*/\r
 \r
 #endif /* configUSE_TASK_NOTIFICATIONS */\r
 /*-----------------------------------------------------------*/\r
 \r
+#if( configUSE_TASK_NOTIFICATIONS == 1 )\r
+\r
+       uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear )\r
+       {\r
+       TCB_t *pxTCB;\r
+       uint32_t ulReturn;\r
+\r
+               /* If null is passed in here then it is the calling task that is having\r
+               its notification state cleared. */\r
+               pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+               taskENTER_CRITICAL();\r
+               {\r
+                       /* Return the notification as it was before the bits were cleared,\r
+                       then clear the bit mask. */\r
+                       ulReturn = pxCurrentTCB->ulNotifiedValue;\r
+                       pxTCB->ulNotifiedValue &= ~ulBitsToClear;\r
+               }\r
+               taskEXIT_CRITICAL();\r
+\r
+               return ulReturn;\r
+       }\r
+\r
+#endif /* configUSE_TASK_NOTIFICATIONS */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )\r
+\r
+       uint32_t ulTaskGetIdleRunTimeCounter( void )\r
+       {\r
+               return xIdleTaskHandle->ulRunTimeCounter;\r
+       }\r
+\r
+#endif\r
+/*-----------------------------------------------------------*/\r
 \r
 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )\r
 {\r
 \r
 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )\r
 {\r