X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=FreeRTOS%2FSource%2Ftasks.c;h=e62b388602fcaec3178b6791d746295fb22f4be7;hb=392726f8e4348b719ec56ba4603a79a8f89efec5;hp=ceb863c588080c414f6c3f9b741b1c9532e4138a;hpb=bd05520bc520a20c1d69b9a1eec0e4e8e1adc014;p=freertos diff --git a/FreeRTOS/Source/tasks.c b/FreeRTOS/Source/tasks.c index ceb863c58..e62b38860 100644 --- a/FreeRTOS/Source/tasks.c +++ b/FreeRTOS/Source/tasks.c @@ -1,6 +1,6 @@ /* - * FreeRTOS Kernel V10.0.1 - * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in @@ -75,24 +75,7 @@ functions but without including stdio.h here. */ */ #define tskSTACK_FILL_BYTE ( 0xa5U ) -/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using -dynamically allocated RAM, in which case when any task is deleted it is known -that both the task's stack and TCB need to be freed. Sometimes the -FreeRTOSConfig.h settings only allow a task to be created using statically -allocated RAM, in which case when any task is deleted it is known that neither -the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h -settings allow a task to be created using either statically or dynamically -allocated RAM, in which case a member of the TCB is used to record whether the -stack and/or TCB were allocated statically or dynamically, so when a task is -deleted the RAM that was allocated dynamically is freed again and no attempt is -made to free the RAM that was allocated statically. -tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a -task to be created using either statically or dynamically allocated RAM. Note -that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with -a statically allocated stack and a dynamically allocated TCB. -!!!NOTE!!! If the definition of tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is -changed then the definition of StaticTask_t must also be updated. */ -#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +/* Bits used to recored how a task's stack and TCB were allocated. */ #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) @@ -100,7 +83,7 @@ changed then the definition of StaticTask_t must also be updated. */ /* If any of the following are set then task stacks are filled with a known value so the high water mark can be determined. If none of the following are set then don't fill the stack so there is no unnecessary dependency on memset. */ -#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1 #else #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0 @@ -266,7 +249,7 @@ to its original value when it is released. */ * and stores task state information, including a pointer to the task's context * (the task's run time environment, including register values) */ -typedef struct TaskControlBlock_t +typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ @@ -317,7 +300,10 @@ typedef struct TaskControlBlock_t responsible for resulting newlib operation. User must be familiar with newlib and must provide system-wide implementations of the necessary stubs. Be warned that (at the time of writing) the current newlib design - implements a system-wide malloc() that must be provided with locks. */ + implements a system-wide malloc() that must be provided with locks. + + See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + for additional information. */ struct _reent xNewLib_reent; #endif @@ -326,7 +312,7 @@ typedef struct TaskControlBlock_t volatile uint8_t ucNotifyState; #endif - /* See the comments above the definition of + /* See the comments in FreeRTOS.h with the definition of tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ @@ -336,6 +322,10 @@ typedef struct TaskControlBlock_t uint8_t ucDelayAborted; #endif + #if( configUSE_POSIX_ERRNO == 1 ) + int iTaskErrno; + #endif + } tskTCB; /* The old tskTCB name is maintained above then typedefed to the new TCB_t name @@ -346,8 +336,13 @@ typedef tskTCB TCB_t; which static variables must be declared volatile. */ PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; -/* Lists for ready and blocked tasks. --------------------*/ +/* Lists for ready and blocked tasks. -------------------- +xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but +doing so breaks some kernel aware debuggers and debuggers that rely on removing +the static qualifier. */ PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ @@ -365,12 +360,18 @@ PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been r #endif +/* Global POSIX errno. Its value is changed upon context switching to match +the errno of the currently running task. */ +#if ( configUSE_POSIX_ERRNO == 1 ) + int FreeRTOS_errno = 0; +#endif + /* Other file private variables. --------------------------------*/ PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; -PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; @@ -387,6 +388,15 @@ when the scheduler is unsuspended. The pending ready list itself can only be accessed from a critical section. */ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + /* Do not move these variables to function scope as doing so prevents the + code working with debuggers that need to remove the static qualifier. */ + PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + +#endif + /*lint -restore */ /*-----------------------------------------------------------*/ @@ -497,7 +507,7 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseT * This function determines the 'high water mark' of the task stack by * determining how much of the stack remains at the original preset value. */ -#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION; @@ -607,7 +617,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; task was created statically in case the task is later deleted. */ pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; } - #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); prvAddNewTaskToReadyList( pxNewTCB ); @@ -649,7 +659,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; task was created statically in case the task is later deleted. */ pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; } - #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, pxTaskDefinition->pcName, @@ -690,14 +700,14 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; /* Store the stack location in the TCB. */ pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; - #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) { /* Tasks can be created statically or dynamically, so note this task had a statically allocated stack in case it is later deleted. The TCB was allocated dynamically. */ pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; } - #endif + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, pxTaskDefinition->pcName, @@ -794,7 +804,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; task was created dynamically in case it is later deleted. */ pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; } - #endif /* configSUPPORT_STATIC_ALLOCATION */ + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); prvAddNewTaskToReadyList( pxNewTCB ); @@ -837,8 +847,6 @@ UBaseType_t x; uxPriority &= ~portPRIVILEGE_BIT; #endif /* portUSING_MPU_WRAPPERS == 1 */ - configASSERT( pcName ); - /* Avoid dependency on memset() if it is not required. */ #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 ) { @@ -881,26 +889,35 @@ UBaseType_t x; #endif /* portSTACK_GROWTH */ /* Store the task name in the TCB. */ - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + if( pcName != NULL ) { - pxNewTCB->pcTaskName[ x ] = pcName[ x ]; - - /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than - configMAX_TASK_NAME_LEN characters just in case the memory after the - string is not accessible (extremely unlikely). */ - if( pcName[ x ] == ( char ) 0x00 ) - { - break; - } - else + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - mtCOVERAGE_TEST_MARKER(); + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + configMAX_TASK_NAME_LEN characters just in case the memory after the + string is not accessible (extremely unlikely). */ + if( pcName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - } - /* Ensure the name string is terminated in the case that the string length - was greater or equal to configMAX_TASK_NAME_LEN. */ - pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + /* Ensure the name string is terminated in the case that the string length + was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + } + else + { + /* The task has not been given a name, so just ensure there is a NULL + terminator when it is read out. */ + pxNewTCB->pcTaskName[ 0 ] = 0x00; + } /* This is used as an array index so must ensure it's not too large. First remove the privilege bit if one is present. */ @@ -979,7 +996,9 @@ UBaseType_t x; #if ( configUSE_NEWLIB_REENTRANT == 1 ) { - /* Initialise this task's Newlib reent structure. */ + /* Initialise this task's Newlib reent structure. + See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + for additional information. */ _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); } #endif @@ -996,11 +1015,49 @@ UBaseType_t x; the top of stack variable is updated. */ #if( portUSING_MPU_WRAPPERS == 1 ) { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ } #else /* portUSING_MPU_WRAPPERS */ { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ } #endif /* portUSING_MPU_WRAPPERS */ @@ -1112,7 +1169,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) being deleted. */ pxTCB = prvGetTCBFromHandle( xTaskToDelete ); - /* Remove task from the ready list. */ + /* Remove task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { taskRESET_READY_PRIORITY( pxTCB->uxPriority ); @@ -1152,6 +1209,10 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) check the xTasksWaitingTermination list. */ ++uxDeletedTasksWaitingCleanUp; + /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as + portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */ + traceTASK_DELETE( pxTCB ); + /* The pre-delete hook is primarily for the Windows simulator, in which Windows specific clean up operations are performed, after which it is not possible to yield away from this task - @@ -1162,14 +1223,13 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) else { --uxCurrentNumberOfTasks; + traceTASK_DELETE( pxTCB ); prvDeleteTCB( pxTCB ); /* Reset the next expected unblock time in case it referred to the task that has just been deleted. */ prvResetNextTaskUnblockTime(); } - - traceTASK_DELETE( pxTCB ); } taskEXIT_CRITICAL(); @@ -1321,7 +1381,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) #endif /* INCLUDE_vTaskDelay */ /*-----------------------------------------------------------*/ -#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) ) +#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) ) eTaskState eTaskGetState( TaskHandle_t xTask ) { @@ -1989,7 +2049,9 @@ BaseType_t xReturn; #if ( configUSE_NEWLIB_REENTRANT == 1 ) { /* Switch Newlib's _impure_ptr variable to point to the _reent - structure specific to the task that will run first. */ + structure specific to the task that will run first. + See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + for additional information. */ _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); } #endif /* configUSE_NEWLIB_REENTRANT */ @@ -2052,6 +2114,7 @@ void vTaskSuspendAll( void ) post in the FreeRTOS support forum before reporting this as a bug! - http://goo.gl/wu4acr */ ++uxSchedulerSuspended; + portMEMORY_BARRIER(); } /*----------------------------------------------------------*/ @@ -2122,6 +2185,7 @@ BaseType_t xTaskResumeAll( void ) { TCB_t *pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; +TickType_t xTicksToNextUnblockTime; /* If uxSchedulerSuspended is zero then this function does not match a previous call to vTaskSuspendAll(). */ @@ -2176,30 +2240,51 @@ BaseType_t xAlreadyYielded = pdFALSE; they should be processed now. This ensures the tick count does not slip, and that any delayed tasks are resumed at the correct time. */ + while( xPendedTicks > ( TickType_t ) 0 ) { - UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */ + /* Calculate how far into the future the next task will + leave the Blocked state because its timeout expired. If + there are no tasks due to leave the blocked state between + the time now and the time at which the tick count overflows + then xNextTaskUnblockTime will the tick overflow time. + This means xNextTaskUnblockTime can never be less than + xTickCount, and the following can therefore not + underflow. */ + configASSERT( xNextTaskUnblockTime >= xTickCount ); + xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount; - if( uxPendedCounts > ( UBaseType_t ) 0U ) + /* Don't want to move the tick count more than the number + of ticks that are pending, so cap if necessary. */ + if( xTicksToNextUnblockTime > xPendedTicks ) { - do - { - if( xTaskIncrementTick() != pdFALSE ) - { - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - --uxPendedCounts; - } while( uxPendedCounts > ( UBaseType_t ) 0U ); + xTicksToNextUnblockTime = xPendedTicks; + } - uxPendedTicks = 0; + if( xTicksToNextUnblockTime == 0 ) + { + /* xTicksToNextUnblockTime could be zero if the tick + count is about to overflow and xTicksToNetUnblockTime + holds the time at which the tick count will overflow + (rather than the time at which the next task will + unblock). Set to 1 otherwise xPendedTicks won't be + decremented below. */ + xTicksToNextUnblockTime = ( TickType_t ) 1; } - else + else if( xTicksToNextUnblockTime > ( TickType_t ) 1 ) { - mtCOVERAGE_TEST_MARKER(); + /* Move the tick count one short of the next unblock + time, then call xTaskIncrementTick() to move the tick + count up to the next unblock time to unblock the task, + if any. This will also swap the blocked task and + overflow blocked task lists if necessary. */ + xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 ); } + xYieldPending |= xTaskIncrementTick(); + + /* Adjust for the number of ticks just added to + xTickCount and go around the loop again if + xTicksToCatchUp is still greater than 0. */ + xPendedTicks -= xTicksToNextUnblockTime; } if( xYieldPending != pdFALSE ) @@ -2533,6 +2618,109 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than #endif /* configUSE_TICKLESS_IDLE */ /*----------------------------------------------------------*/ +BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) +{ +BaseType_t xYieldRequired = pdFALSE; + + /* Must not be called with the scheduler suspended as the implementation + relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */ + configASSERT( uxSchedulerSuspended == 0 ); + + /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when + the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ + vTaskSuspendAll(); + xPendedTicks += xTicksToCatchUp; + xYieldRequired = xTaskResumeAll(); + + return xYieldRequired; +} +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t xTaskAbortDelayFromISR( TaskHandle_t xTask, BaseType_t * const pxHigherPriorityTaskWoken ) + { + TCB_t *pxTCB = xTask; + BaseType_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxTCB ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* A task can only be prematurely removed from the Blocked state if + it is actually in the Blocked state. */ + if( eTaskGetState( xTask ) == eBlocked ) + { + xReturn = pdPASS; + + /* Remove the reference to the task from the blocked list. A higher + priority interrupt won't touch the xStateListItem because of the + critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove it from + the event list too. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + + /* This lets the task know it was forcibly removed from the + blocked state so it should not re-evaluate its block time and + then block again. */ + pxTCB->ucDelayAborted = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready list. */ + prvAddTaskToReadyList( pxTCB ); + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + /* Pend the yield to be performed when the scheduler + is unsuspended. */ + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif +/*----------------------------------------------------------*/ + #if ( INCLUDE_xTaskAbortDelay == 1 ) BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) @@ -2564,6 +2752,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + + /* This lets the task know it was forcibly removed from the + blocked state so it should not re-evaluate its block time and + then block again. */ pxTCB->ucDelayAborted = pdTRUE; } else @@ -2740,7 +2932,7 @@ BaseType_t xSwitchRequired = pdFALSE; { /* Guard against the tick hook being called when the pended tick count is being unwound (when the scheduler is being unlocked). */ - if( uxPendedTicks == ( UBaseType_t ) 0U ) + if( xPendedTicks == ( TickType_t ) 0 ) { vApplicationTickHook(); } @@ -2750,10 +2942,23 @@ BaseType_t xSwitchRequired = pdFALSE; } } #endif /* configUSE_TICK_HOOK */ + + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldPending != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ } else { - ++uxPendedTicks; + ++xPendedTicks; /* The tick hook gets called at regular intervals, even if the scheduler is locked. */ @@ -2764,19 +2969,6 @@ BaseType_t xSwitchRequired = pdFALSE; #endif } - #if ( configUSE_PREEMPTION == 1 ) - { - if( xYieldPending != pdFALSE ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_PREEMPTION */ - return xSwitchRequired; } /*-----------------------------------------------------------*/ @@ -2814,24 +3006,17 @@ BaseType_t xSwitchRequired = pdFALSE; TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) { - TCB_t *xTCB; + TCB_t *pxTCB; TaskHookFunction_t xReturn; - /* If xTask is NULL then we are setting our own task hook. */ - if( xTask == NULL ) - { - xTCB = ( TCB_t * ) pxCurrentTCB; - } - else - { - xTCB = xTask; - } + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as the value can be accessed from an interrupt. */ taskENTER_CRITICAL(); { - xReturn = xTCB->pxTaskTag; + xReturn = pxTCB->pxTaskTag; } taskEXIT_CRITICAL(); @@ -2841,6 +3026,31 @@ BaseType_t xSwitchRequired = pdFALSE; #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = pxTCB->pxTaskTag; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) @@ -2888,46 +3098,59 @@ void vTaskSwitchContext( void ) #if ( configGENERATE_RUN_TIME_STATS == 1 ) { - PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ - - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); - #endif + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif - /* Add the amount of time the task has been running to the - accumulated time so far. The time the task started running was - stored in ulTaskSwitchedInTime. Note that there is no overflow - protection here so count values are only valid until the timer - overflows. The guard against negative values is to protect - against suspect run time stat counter implementations - which - are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) - { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - ulTaskSwitchedInTime = ulTotalRunTime; + /* Add the amount of time the task has been running to the + accumulated time so far. The time the task started running was + stored in ulTaskSwitchedInTime. Note that there is no overflow + protection here so count values are only valid until the timer + overflows. The guard against negative values is to protect + against suspect run time stat counter implementations - which + are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ulTaskSwitchedInTime = ulTotalRunTime; } #endif /* configGENERATE_RUN_TIME_STATS */ /* Check for stack overflow, if configured. */ taskCHECK_FOR_STACK_OVERFLOW(); + /* Before the currently running task is switched out, save its errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + /* Select a new task to run using either the generic C or port optimised asm code. */ taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ traceTASK_SWITCHED_IN(); + /* After the new task is switched in, update the global errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + #if ( configUSE_NEWLIB_REENTRANT == 1 ) { /* Switch Newlib's _impure_ptr variable to point to the _reent - structure specific to this task. */ + structure specific to this task. + See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + for additional information. */ _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); } #endif /* configUSE_NEWLIB_REENTRANT */ @@ -3035,6 +3258,20 @@ BaseType_t xReturn; { ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + might be set to the blocked task's time out time. If the task is + unblocked for a reason other than a timeout xNextTaskUnblockTime is + normally left unchanged, because it is automatically reset to a new + value when the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter sleep mode + at the earliest possible time - so reset xNextTaskUnblockTime here to + ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif } else { @@ -3059,20 +3296,6 @@ BaseType_t xReturn; xReturn = pdFALSE; } - #if( configUSE_TICKLESS_IDLE != 0 ) - { - /* If a task is blocked on a kernel object then xNextTaskUnblockTime - might be set to the blocked task's time out time. If the task is - unblocked for a reason other than a timeout xNextTaskUnblockTime is - normally left unchanged, because it is automatically reset to a new - value when the tick count equals xNextTaskUnblockTime. However if - tickless idling is used it might be more important to enter sleep mode - at the earliest possible time - so reset xNextTaskUnblockTime here to - ensure it is updated at the earliest possible time. */ - prvResetNextTaskUnblockTime(); - } - #endif - return xReturn; } /*-----------------------------------------------------------*/ @@ -3094,6 +3317,20 @@ TCB_t *pxUnblockedTCB; configASSERT( pxUnblockedTCB ); ( void ) uxListRemove( pxEventListItem ); + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + might be set to the blocked task's time out time. If the task is + unblocked for a reason other than a timeout xNextTaskUnblockTime is + normally left unchanged, because it is automatically reset to a new + value when the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter sleep mode + at the earliest possible time - so reset xNextTaskUnblockTime here to + ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + /* Remove the task from the delayed list and add it to the ready list. The scheduler is suspended so interrupts will not be accessing the ready lists. */ @@ -3260,7 +3497,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) /* In case a task that has a secure context deletes itself, in which case the idle task is responsible for deleting the task's secure context, if any. */ - portTASK_CALLS_SECURE_FUNCTIONS(); + portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); for( ;; ) { @@ -3374,6 +3611,8 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) const UBaseType_t uxNonApplicationTasks = 1; eSleepModeStatus eReturn = eStandardSleep; + /* This function must be called from a critical section. */ + if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) { /* A task was made ready while the scheduler was suspended. */ @@ -3464,8 +3703,6 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) static void prvInitialiseTaskLists( void ) { UBaseType_t uxPriority; -PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) { @@ -3653,7 +3890,7 @@ static void prvCheckTasksWaitingTermination( void ) #endif /* configUSE_TRACE_FACILITY */ /*-----------------------------------------------------------*/ -#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) { @@ -3670,7 +3907,47 @@ static void prvCheckTasksWaitingTermination( void ) return ( configSTACK_DEPTH_TYPE ) ulCount; } -#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) */ +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + same except for their return type. Using configSTACK_DEPTH_TYPE allows the + user to determine the return type. It gets around the problem of the value + overflowing on 8-bit types without breaking backward compatibility for + applications that expect an 8-bit return type. */ + configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + configSTACK_DEPTH_TYPE uxReturn; + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are + the same except for their return type. Using configSTACK_DEPTH_TYPE + allows the user to determine the return type. It gets around the + problem of the value overflowing on 8-bit types without breaking + backward compatibility for applications that expect an 8-bit return + type. */ + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */ /*-----------------------------------------------------------*/ #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) @@ -3711,7 +3988,9 @@ static void prvCheckTasksWaitingTermination( void ) portCLEAN_UP_TCB( pxTCB ); /* Free up the memory allocated by the scheduler for the task. It is up - to the task to free any memory allocated at the application level. */ + to the task to free any memory allocated at the application level. + See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + for additional information. */ #if ( configUSE_NEWLIB_REENTRANT == 1 ) { _reclaim_reent( &( pxTCB->xNewLib_reent ) ); @@ -3861,7 +4140,10 @@ TCB_t *pxTCB; { if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { - taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority ); + /* It is known that the task is in its ready list so + there is no need to check again and the port level + reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); } else { @@ -3941,7 +4223,7 @@ TCB_t *pxTCB; the mutex. If the mutex is held by a task then it cannot be given from an interrupt, and if a mutex is given by the holding task then it must be the running state task. Remove - the holding task from the ready list. */ + the holding task from the ready/delayed list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { taskRESET_READY_PRIORITY( pxTCB->uxPriority ); @@ -4062,7 +4344,10 @@ TCB_t *pxTCB; { if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) { - taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + /* It is known that the task is in its ready list so + there is no need to check again and the port level + reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); } else { @@ -4916,7 +5201,6 @@ TickType_t uxReturn; } #endif /* configUSE_TASK_NOTIFICATIONS */ - /*-----------------------------------------------------------*/ #if( configUSE_TASK_NOTIFICATIONS == 1 ) @@ -4950,6 +5234,41 @@ TickType_t uxReturn; #endif /* configUSE_TASK_NOTIFICATIONS */ /*-----------------------------------------------------------*/ +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear ) + { + TCB_t *pxTCB; + uint32_t ulReturn; + + /* If null is passed in here then it is the calling task that is having + its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + /* Return the notification as it was before the bits were cleared, + then clear the bit mask. */ + ulReturn = pxCurrentTCB->ulNotifiedValue; + pxTCB->ulNotifiedValue &= ~ulBitsToClear; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + uint32_t ulTaskGetIdleRunTimeCounter( void ) + { + return xIdleTaskHandle->ulRunTimeCounter; + } + +#endif +/*-----------------------------------------------------------*/ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) {