/*\r
- * FreeRTOS Kernel V10.1.1\r
- * Copyright (C) 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\r
+ * FreeRTOS Kernel V10.2.1\r
+ * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\r
*\r
* Permission is hereby granted, free of charge, to any person obtaining a copy of\r
* this software and associated documentation files (the "Software"), to deal in\r
*/\r
#define tskSTACK_FILL_BYTE ( 0xa5U )\r
\r
-/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using\r
-dynamically allocated RAM, in which case when any task is deleted it is known\r
-that both the task's stack and TCB need to be freed. Sometimes the\r
-FreeRTOSConfig.h settings only allow a task to be created using statically\r
-allocated RAM, in which case when any task is deleted it is known that neither\r
-the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h\r
-settings allow a task to be created using either statically or dynamically\r
-allocated RAM, in which case a member of the TCB is used to record whether the\r
-stack and/or TCB were allocated statically or dynamically, so when a task is\r
-deleted the RAM that was allocated dynamically is freed again and no attempt is\r
-made to free the RAM that was allocated statically.\r
-tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a\r
-task to be created using either statically or dynamically allocated RAM. Note\r
-that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with\r
-a statically allocated stack and a dynamically allocated TCB.\r
-!!!NOTE!!! If the definition of tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is\r
-changed then the definition of StaticTask_t must also be updated. */\r
-#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
+/* Bits used to recored how a task's stack and TCB were allocated. */\r
#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )\r
#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )\r
#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )\r
responsible for resulting newlib operation. User must be familiar with\r
newlib and must provide system-wide implementations of the necessary\r
stubs. Be warned that (at the time of writing) the current newlib design\r
- implements a system-wide malloc() that must be provided with locks. */\r
+ implements a system-wide malloc() that must be provided with locks.\r
+\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
struct _reent xNewLib_reent;\r
#endif\r
\r
volatile uint8_t ucNotifyState;\r
#endif\r
\r
- /* See the comments above the definition of\r
+ /* See the comments in FreeRTOS.h with the definition of\r
tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */\r
#if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */\r
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */\r
PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;\r
PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;\r
PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;\r
-PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;\r
+PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;\r
PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;\r
PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;\r
PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;\r
task was created statically in case the task is later deleted. */\r
pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;\r
}\r
- #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
+ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
\r
prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );\r
prvAddNewTaskToReadyList( pxNewTCB );\r
task was created statically in case the task is later deleted. */\r
pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;\r
}\r
- #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
+ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
\r
prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,\r
pxTaskDefinition->pcName,\r
/* Store the stack location in the TCB. */\r
pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;\r
\r
- #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
+ #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )\r
{\r
/* Tasks can be created statically or dynamically, so note\r
this task had a statically allocated stack in case it is\r
later deleted. The TCB was allocated dynamically. */\r
pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;\r
}\r
- #endif\r
+ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
\r
prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,\r
pxTaskDefinition->pcName,\r
task was created dynamically in case it is later deleted. */\r
pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;\r
}\r
- #endif /* configSUPPORT_STATIC_ALLOCATION */\r
+ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */\r
\r
prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );\r
prvAddNewTaskToReadyList( pxNewTCB );\r
\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
- /* Initialise this task's Newlib reent structure. */\r
+ /* Initialise this task's Newlib reent structure.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
_REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );\r
}\r
#endif\r
the top of stack variable is updated. */\r
#if( portUSING_MPU_WRAPPERS == 1 )\r
{\r
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+ /* If the port has capability to detect stack overflow,\r
+ pass the stack end address to the stack initialization\r
+ function as well. */\r
+ #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )\r
+ {\r
+ #if( portSTACK_GROWTH < 0 )\r
+ {\r
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+ }\r
+ #else /* portSTACK_GROWTH */\r
+ {\r
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+ }\r
+ #endif /* portSTACK_GROWTH */\r
+ }\r
+ #else /* portHAS_STACK_OVERFLOW_CHECKING */\r
+ {\r
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );\r
+ }\r
+ #endif /* portHAS_STACK_OVERFLOW_CHECKING */\r
}\r
#else /* portUSING_MPU_WRAPPERS */\r
{\r
- pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );\r
+ /* If the port has capability to detect stack overflow,\r
+ pass the stack end address to the stack initialization\r
+ function as well. */\r
+ #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )\r
+ {\r
+ #if( portSTACK_GROWTH < 0 )\r
+ {\r
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );\r
+ }\r
+ #else /* portSTACK_GROWTH */\r
+ {\r
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );\r
+ }\r
+ #endif /* portSTACK_GROWTH */\r
+ }\r
+ #else /* portHAS_STACK_OVERFLOW_CHECKING */\r
+ {\r
+ pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );\r
+ }\r
+ #endif /* portHAS_STACK_OVERFLOW_CHECKING */\r
}\r
#endif /* portUSING_MPU_WRAPPERS */\r
\r
being deleted. */\r
pxTCB = prvGetTCBFromHandle( xTaskToDelete );\r
\r
- /* Remove task from the ready list. */\r
+ /* Remove task from the ready/delayed list. */\r
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
{\r
taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
check the xTasksWaitingTermination list. */\r
++uxDeletedTasksWaitingCleanUp;\r
\r
+ /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as\r
+ portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */\r
+ traceTASK_DELETE( pxTCB );\r
+\r
/* The pre-delete hook is primarily for the Windows simulator,\r
in which Windows specific clean up operations are performed,\r
after which it is not possible to yield away from this task -\r
else\r
{\r
--uxCurrentNumberOfTasks;\r
+ traceTASK_DELETE( pxTCB );\r
prvDeleteTCB( pxTCB );\r
\r
/* Reset the next expected unblock time in case it referred to\r
the task that has just been deleted. */\r
prvResetNextTaskUnblockTime();\r
}\r
-\r
- traceTASK_DELETE( pxTCB );\r
}\r
taskEXIT_CRITICAL();\r
\r
#endif /* INCLUDE_vTaskDelay */\r
/*-----------------------------------------------------------*/\r
\r
-#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) )\r
+#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )\r
\r
eTaskState eTaskGetState( TaskHandle_t xTask )\r
{\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
/* Switch Newlib's _impure_ptr variable to point to the _reent\r
- structure specific to the task that will run first. */\r
+ structure specific to the task that will run first.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
_impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
}\r
#endif /* configUSE_NEWLIB_REENTRANT */\r
post in the FreeRTOS support forum before reporting this as a bug! -\r
http://goo.gl/wu4acr */\r
++uxSchedulerSuspended;\r
+ portMEMORY_BARRIER();\r
}\r
/*----------------------------------------------------------*/\r
\r
{\r
TCB_t *pxTCB = NULL;\r
BaseType_t xAlreadyYielded = pdFALSE;\r
+TickType_t xTicksToNextUnblockTime;\r
\r
/* If uxSchedulerSuspended is zero then this function does not match a\r
previous call to vTaskSuspendAll(). */\r
they should be processed now. This ensures the tick count does\r
not slip, and that any delayed tasks are resumed at the correct\r
time. */\r
+ while( xPendedTicks > ( TickType_t ) 0 )\r
{\r
- UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */\r
+ /* Calculate how far into the future the next task will\r
+ leave the Blocked state because its timeout expired. If\r
+ there are no tasks due to leave the blocked state between\r
+ the time now and the time at which the tick count overflows\r
+ then xNextTaskUnblockTime will the tick overflow time.\r
+ This means xNextTaskUnblockTime can never be less than\r
+ xTickCount, and the following can therefore not\r
+ underflow. */\r
+ configASSERT( xNextTaskUnblockTime >= xTickCount );\r
+ xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount;\r
\r
- if( uxPendedCounts > ( UBaseType_t ) 0U )\r
+ /* Don't want to move the tick count more than the number\r
+ of ticks that are pending, so cap if necessary. */\r
+ if( xTicksToNextUnblockTime > xPendedTicks )\r
{\r
- do\r
- {\r
- if( xTaskIncrementTick() != pdFALSE )\r
- {\r
- xYieldPending = pdTRUE;\r
- }\r
- else\r
- {\r
- mtCOVERAGE_TEST_MARKER();\r
- }\r
- --uxPendedCounts;\r
- } while( uxPendedCounts > ( UBaseType_t ) 0U );\r
+ xTicksToNextUnblockTime = xPendedTicks;\r
+ }\r
\r
- uxPendedTicks = 0;\r
+ if( xTicksToNextUnblockTime == 0 )\r
+ {\r
+ /* xTicksToNextUnblockTime could be zero if the tick\r
+ count is about to overflow and xTicksToNetUnblockTime\r
+ holds the time at which the tick count will overflow\r
+ (rather than the time at which the next task will\r
+ unblock). Set to 1 otherwise xPendedTicks won't be\r
+ decremented below. */\r
+ xTicksToNextUnblockTime = ( TickType_t ) 1;\r
}\r
- else\r
+ else if( xTicksToNextUnblockTime > ( TickType_t ) 1 )\r
{\r
- mtCOVERAGE_TEST_MARKER();\r
+ /* Move the tick count one short of the next unblock\r
+ time, then call xTaskIncrementTick() to move the tick\r
+ count up to the next unblock time to unblock the task,\r
+ if any. This will also swap the blocked task and\r
+ overflow blocked task lists if necessary. */\r
+ xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 );\r
}\r
+ xYieldPending |= xTaskIncrementTick();\r
+\r
+ /* Adjust for the number of ticks just added to\r
+ xTickCount and go around the loop again if\r
+ xTicksToCatchUp is still greater than 0. */\r
+ xPendedTicks -= xTicksToNextUnblockTime;\r
}\r
\r
if( xYieldPending != pdFALSE )\r
#endif /* configUSE_TICKLESS_IDLE */\r
/*----------------------------------------------------------*/\r
\r
+BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )\r
+{\r
+BaseType_t xYieldRequired = pdFALSE;\r
+\r
+ /* Must not be called with the scheduler suspended as the implementation\r
+ relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */\r
+ configASSERT( uxSchedulerSuspended == 0 );\r
+\r
+ /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when\r
+ the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */\r
+ vTaskSuspendAll();\r
+ xPendedTicks += xTicksToCatchUp;\r
+ xYieldRequired = xTaskResumeAll();\r
+\r
+ return xYieldRequired;\r
+}\r
+/*----------------------------------------------------------*/\r
+\r
+#if ( INCLUDE_xTaskAbortDelay == 1 )\r
+\r
+ BaseType_t xTaskAbortDelayFromISR( TaskHandle_t xTask, BaseType_t * const pxHigherPriorityTaskWoken )\r
+ {\r
+ TCB_t *pxTCB = xTask;\r
+ BaseType_t xReturn;\r
+ UBaseType_t uxSavedInterruptStatus;\r
+\r
+ configASSERT( pxTCB );\r
+\r
+ /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+ system call (or maximum API call) interrupt priority. Interrupts that are\r
+ above the maximum system call priority are kept permanently enabled, even\r
+ when the RTOS kernel is in a critical section, but cannot make any calls to\r
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h\r
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+ failure if a FreeRTOS API function is called from an interrupt that has been\r
+ assigned a priority above the configured maximum system call priority.\r
+ Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+ that have been assigned a priority at or (logically) below the maximum\r
+ system call interrupt priority. FreeRTOS maintains a separate interrupt\r
+ safe API to ensure interrupt entry is as fast and as simple as possible.\r
+ More information (albeit Cortex-M specific) is provided on the following\r
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+ {\r
+ /* A task can only be prematurely removed from the Blocked state if\r
+ it is actually in the Blocked state. */\r
+ if( eTaskGetState( xTask ) == eBlocked )\r
+ {\r
+ xReturn = pdPASS;\r
+\r
+ /* Remove the reference to the task from the blocked list. A higher\r
+ priority interrupt won't touch the xStateListItem because of the\r
+ critical section. */\r
+ ( void ) uxListRemove( &( pxTCB->xStateListItem ) );\r
+\r
+ /* Is the task waiting on an event also? If so remove it from\r
+ the event list too. */\r
+ if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
+ {\r
+ ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+\r
+ /* This lets the task know it was forcibly removed from the\r
+ blocked state so it should not re-evaluate its block time and\r
+ then block again. */\r
+ pxTCB->ucDelayAborted = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ /* Place the unblocked task into the appropriate ready list. */\r
+ prvAddTaskToReadyList( pxTCB );\r
+\r
+ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )\r
+ {\r
+ if( pxHigherPriorityTaskWoken != NULL )\r
+ {\r
+ /* Pend the yield to be performed when the scheduler\r
+ is unsuspended. */\r
+ *pxHigherPriorityTaskWoken = pdTRUE;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ xReturn = pdFAIL;\r
+ }\r
+ }\r
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif\r
+/*----------------------------------------------------------*/\r
+\r
#if ( INCLUDE_xTaskAbortDelay == 1 )\r
\r
BaseType_t xTaskAbortDelay( TaskHandle_t xTask )\r
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
{\r
( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+\r
+ /* This lets the task know it was forcibly removed from the\r
+ blocked state so it should not re-evaluate its block time and\r
+ then block again. */\r
pxTCB->ucDelayAborted = pdTRUE;\r
}\r
else\r
{\r
/* Guard against the tick hook being called when the pended tick\r
count is being unwound (when the scheduler is being unlocked). */\r
- if( uxPendedTicks == ( UBaseType_t ) 0U )\r
+ if( xPendedTicks == ( TickType_t ) 0 )\r
{\r
vApplicationTickHook();\r
}\r
}\r
}\r
#endif /* configUSE_TICK_HOOK */\r
+\r
+ #if ( configUSE_PREEMPTION == 1 )\r
+ {\r
+ if( xYieldPending != pdFALSE )\r
+ {\r
+ xSwitchRequired = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_PREEMPTION */\r
}\r
else\r
{\r
- ++uxPendedTicks;\r
+ ++xPendedTicks;\r
\r
/* The tick hook gets called at regular intervals, even if the\r
scheduler is locked. */\r
#endif\r
}\r
\r
- #if ( configUSE_PREEMPTION == 1 )\r
- {\r
- if( xYieldPending != pdFALSE )\r
- {\r
- xSwitchRequired = pdTRUE;\r
- }\r
- else\r
- {\r
- mtCOVERAGE_TEST_MARKER();\r
- }\r
- }\r
- #endif /* configUSE_PREEMPTION */\r
-\r
return xSwitchRequired;\r
}\r
/*-----------------------------------------------------------*/\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
/* Switch Newlib's _impure_ptr variable to point to the _reent\r
- structure specific to this task. */\r
+ structure specific to this task.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
_impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
}\r
#endif /* configUSE_NEWLIB_REENTRANT */\r
configASSERT( pxUnblockedTCB );\r
( void ) uxListRemove( pxEventListItem );\r
\r
+ #if( configUSE_TICKLESS_IDLE != 0 )\r
+ {\r
+ /* If a task is blocked on a kernel object then xNextTaskUnblockTime\r
+ might be set to the blocked task's time out time. If the task is\r
+ unblocked for a reason other than a timeout xNextTaskUnblockTime is\r
+ normally left unchanged, because it is automatically reset to a new\r
+ value when the tick count equals xNextTaskUnblockTime. However if\r
+ tickless idling is used it might be more important to enter sleep mode\r
+ at the earliest possible time - so reset xNextTaskUnblockTime here to\r
+ ensure it is updated at the earliest possible time. */\r
+ prvResetNextTaskUnblockTime();\r
+ }\r
+ #endif\r
+\r
/* Remove the task from the delayed list and add it to the ready list. The\r
scheduler is suspended so interrupts will not be accessing the ready\r
lists. */\r
/* In case a task that has a secure context deletes itself, in which case\r
the idle task is responsible for deleting the task's secure context, if\r
any. */\r
- portTASK_CALLS_SECURE_FUNCTIONS();\r
+ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );\r
\r
for( ;; )\r
{\r
const UBaseType_t uxNonApplicationTasks = 1;\r
eSleepModeStatus eReturn = eStandardSleep;\r
\r
+ /* This function must be called from a critical section. */\r
+\r
if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )\r
{\r
/* A task was made ready while the scheduler was suspended. */\r
portCLEAN_UP_TCB( pxTCB );\r
\r
/* Free up the memory allocated by the scheduler for the task. It is up\r
- to the task to free any memory allocated at the application level. */\r
+ to the task to free any memory allocated at the application level.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
_reclaim_reent( &( pxTCB->xNewLib_reent ) );\r
{\r
if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
{\r
- taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );\r
+ /* It is known that the task is in its ready list so\r
+ there is no need to check again and the port level\r
+ reset macro can be called directly. */\r
+ portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );\r
}\r
else\r
{\r
the mutex. If the mutex is held by a task then it cannot be\r
given from an interrupt, and if a mutex is given by the\r
holding task then it must be the running state task. Remove\r
- the holding task from the ready list. */\r
+ the holding task from the ready/delayed list. */\r
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
{\r
taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
{\r
if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )\r
{\r
- taskRESET_READY_PRIORITY( pxTCB->uxPriority );\r
+ /* It is known that the task is in its ready list so\r
+ there is no need to check again and the port level\r
+ reset macro can be called directly. */\r
+ portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );\r
}\r
else\r
{\r
}\r
\r
#endif /* configUSE_TASK_NOTIFICATIONS */\r
-\r
/*-----------------------------------------------------------*/\r
\r
#if( configUSE_TASK_NOTIFICATIONS == 1 )\r
#endif /* configUSE_TASK_NOTIFICATIONS */\r
/*-----------------------------------------------------------*/\r
\r
+#if( configUSE_TASK_NOTIFICATIONS == 1 )\r
+\r
+ uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear )\r
+ {\r
+ TCB_t *pxTCB;\r
+ uint32_t ulReturn;\r
+\r
+ /* If null is passed in here then it is the calling task that is having\r
+ its notification state cleared. */\r
+ pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+ taskENTER_CRITICAL();\r
+ {\r
+ /* Return the notification as it was before the bits were cleared,\r
+ then clear the bit mask. */\r
+ ulReturn = pxCurrentTCB->ulNotifiedValue;\r
+ pxTCB->ulNotifiedValue &= ~ulBitsToClear;\r
+ }\r
+ taskEXIT_CRITICAL();\r
+\r
+ return ulReturn;\r
+ }\r
+\r
+#endif /* configUSE_TASK_NOTIFICATIONS */\r
+/*-----------------------------------------------------------*/\r
+\r
+#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )\r
+\r
+ uint32_t ulTaskGetIdleRunTimeCounter( void )\r
+ {\r
+ return xIdleTaskHandle->ulRunTimeCounter;\r
+ }\r
+\r
+#endif\r
+/*-----------------------------------------------------------*/\r
\r
static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )\r
{\r