responsible for resulting newlib operation. User must be familiar with\r
newlib and must provide system-wide implementations of the necessary\r
stubs. Be warned that (at the time of writing) the current newlib design\r
- implements a system-wide malloc() that must be provided with locks. */\r
+ implements a system-wide malloc() that must be provided with locks.\r
+\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
struct _reent xNewLib_reent;\r
#endif\r
\r
\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
- /* Initialise this task's Newlib reent structure. */\r
+ /* Initialise this task's Newlib reent structure.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
_REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );\r
}\r
#endif\r
check the xTasksWaitingTermination list. */\r
++uxDeletedTasksWaitingCleanUp;\r
\r
+ /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as\r
+ portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */\r
+ traceTASK_DELETE( pxTCB );\r
+\r
/* The pre-delete hook is primarily for the Windows simulator,\r
in which Windows specific clean up operations are performed,\r
after which it is not possible to yield away from this task -\r
else\r
{\r
--uxCurrentNumberOfTasks;\r
+ traceTASK_DELETE( pxTCB );\r
prvDeleteTCB( pxTCB );\r
\r
/* Reset the next expected unblock time in case it referred to\r
the task that has just been deleted. */\r
prvResetNextTaskUnblockTime();\r
}\r
-\r
- traceTASK_DELETE( pxTCB );\r
}\r
taskEXIT_CRITICAL();\r
\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
/* Switch Newlib's _impure_ptr variable to point to the _reent\r
- structure specific to the task that will run first. */\r
+ structure specific to the task that will run first.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
_impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
}\r
#endif /* configUSE_NEWLIB_REENTRANT */\r
relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */\r
configASSERT( uxSchedulerSuspended == 0 );\r
\r
- /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when\r
+ /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when\r
the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */\r
vTaskSuspendAll();\r
xPendedTicks += xTicksToCatchUp;\r
}\r
/*----------------------------------------------------------*/\r
\r
+#if ( INCLUDE_xTaskAbortDelay == 1 )\r
+\r
+ BaseType_t xTaskAbortDelayFromISR( TaskHandle_t xTask, BaseType_t * const pxHigherPriorityTaskWoken )\r
+ {\r
+ TCB_t *pxTCB = xTask;\r
+ BaseType_t xReturn;\r
+ UBaseType_t uxSavedInterruptStatus;\r
+\r
+ configASSERT( pxTCB );\r
+\r
+ /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+ system call (or maximum API call) interrupt priority. Interrupts that are\r
+ above the maximum system call priority are kept permanently enabled, even\r
+ when the RTOS kernel is in a critical section, but cannot make any calls to\r
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h\r
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+ failure if a FreeRTOS API function is called from an interrupt that has been\r
+ assigned a priority above the configured maximum system call priority.\r
+ Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+ that have been assigned a priority at or (logically) below the maximum\r
+ system call interrupt priority. FreeRTOS maintains a separate interrupt\r
+ safe API to ensure interrupt entry is as fast and as simple as possible.\r
+ More information (albeit Cortex-M specific) is provided on the following\r
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+ {\r
+ /* A task can only be prematurely removed from the Blocked state if\r
+ it is actually in the Blocked state. */\r
+ if( eTaskGetState( xTask ) == eBlocked )\r
+ {\r
+ xReturn = pdPASS;\r
+\r
+ /* Remove the reference to the task from the blocked list. A higher\r
+ priority interrupt won't touch the xStateListItem because of the\r
+ critical section. */\r
+ ( void ) uxListRemove( &( pxTCB->xStateListItem ) );\r
+\r
+ /* Is the task waiting on an event also? If so remove it from\r
+ the event list too. */\r
+ if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
+ {\r
+ ( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+\r
+ /* This lets the task know it was forcibly removed from the\r
+ blocked state so it should not re-evaluate its block time and\r
+ then block again. */\r
+ pxTCB->ucDelayAborted = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+\r
+ /* Place the unblocked task into the appropriate ready list. */\r
+ prvAddTaskToReadyList( pxTCB );\r
+\r
+ if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )\r
+ {\r
+ if( pxHigherPriorityTaskWoken != NULL )\r
+ {\r
+ /* Pend the yield to be performed when the scheduler\r
+ is unsuspended. */\r
+ *pxHigherPriorityTaskWoken = pdTRUE;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ xReturn = pdFAIL;\r
+ }\r
+ }\r
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+ return xReturn;\r
+ }\r
+\r
+#endif\r
+/*----------------------------------------------------------*/\r
+\r
#if ( INCLUDE_xTaskAbortDelay == 1 )\r
\r
BaseType_t xTaskAbortDelay( TaskHandle_t xTask )\r
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )\r
{\r
( void ) uxListRemove( &( pxTCB->xEventListItem ) );\r
+\r
+ /* This lets the task know it was forcibly removed from the\r
+ blocked state so it should not re-evaluate its block time and\r
+ then block again. */\r
pxTCB->ucDelayAborted = pdTRUE;\r
}\r
else\r
}\r
}\r
#endif /* configUSE_TICK_HOOK */\r
+\r
+ #if ( configUSE_PREEMPTION == 1 )\r
+ {\r
+ if( xYieldPending != pdFALSE )\r
+ {\r
+ xSwitchRequired = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_PREEMPTION */\r
}\r
else\r
{\r
#endif\r
}\r
\r
- #if ( configUSE_PREEMPTION == 1 )\r
- {\r
- if( xYieldPending != pdFALSE )\r
- {\r
- xSwitchRequired = pdTRUE;\r
- }\r
- else\r
- {\r
- mtCOVERAGE_TEST_MARKER();\r
- }\r
- }\r
- #endif /* configUSE_PREEMPTION */\r
-\r
return xSwitchRequired;\r
}\r
/*-----------------------------------------------------------*/\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
/* Switch Newlib's _impure_ptr variable to point to the _reent\r
- structure specific to this task. */\r
+ structure specific to this task.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
_impure_ptr = &( pxCurrentTCB->xNewLib_reent );\r
}\r
#endif /* configUSE_NEWLIB_REENTRANT */\r
configASSERT( pxUnblockedTCB );\r
( void ) uxListRemove( pxEventListItem );\r
\r
+ #if( configUSE_TICKLESS_IDLE != 0 )\r
+ {\r
+ /* If a task is blocked on a kernel object then xNextTaskUnblockTime\r
+ might be set to the blocked task's time out time. If the task is\r
+ unblocked for a reason other than a timeout xNextTaskUnblockTime is\r
+ normally left unchanged, because it is automatically reset to a new\r
+ value when the tick count equals xNextTaskUnblockTime. However if\r
+ tickless idling is used it might be more important to enter sleep mode\r
+ at the earliest possible time - so reset xNextTaskUnblockTime here to\r
+ ensure it is updated at the earliest possible time. */\r
+ prvResetNextTaskUnblockTime();\r
+ }\r
+ #endif\r
+\r
/* Remove the task from the delayed list and add it to the ready list. The\r
scheduler is suspended so interrupts will not be accessing the ready\r
lists. */\r
const UBaseType_t uxNonApplicationTasks = 1;\r
eSleepModeStatus eReturn = eStandardSleep;\r
\r
+ /* This function must be called from a critical section. */\r
+\r
if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )\r
{\r
/* A task was made ready while the scheduler was suspended. */\r
portCLEAN_UP_TCB( pxTCB );\r
\r
/* Free up the memory allocated by the scheduler for the task. It is up\r
- to the task to free any memory allocated at the application level. */\r
+ to the task to free any memory allocated at the application level.\r
+ See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html\r
+ for additional information. */\r
#if ( configUSE_NEWLIB_REENTRANT == 1 )\r
{\r
_reclaim_reent( &( pxTCB->xNewLib_reent ) );\r
}\r
\r
#endif /* configUSE_TASK_NOTIFICATIONS */\r
-\r
/*-----------------------------------------------------------*/\r
\r
#if( configUSE_TASK_NOTIFICATIONS == 1 )\r
#endif /* configUSE_TASK_NOTIFICATIONS */\r
/*-----------------------------------------------------------*/\r
\r
+#if( configUSE_TASK_NOTIFICATIONS == 1 )\r
+\r
+ uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear )\r
+ {\r
+ TCB_t *pxTCB;\r
+ uint32_t ulReturn;\r
+\r
+ /* If null is passed in here then it is the calling task that is having\r
+ its notification state cleared. */\r
+ pxTCB = prvGetTCBFromHandle( xTask );\r
+\r
+ taskENTER_CRITICAL();\r
+ {\r
+ /* Return the notification as it was before the bits were cleared,\r
+ then clear the bit mask. */\r
+ ulReturn = pxCurrentTCB->ulNotifiedValue;\r
+ pxTCB->ulNotifiedValue &= ~ulBitsToClear;\r
+ }\r
+ taskEXIT_CRITICAL();\r
+\r
+ return ulReturn;\r
+ }\r
+\r
+#endif /* configUSE_TASK_NOTIFICATIONS */\r
+/*-----------------------------------------------------------*/\r
+\r
#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )\r
\r
uint32_t ulTaskGetIdleRunTimeCounter( void )\r