/*\r
- FreeRTOS V7.6.0 - Copyright (C) 2013 Real Time Engineers Ltd.\r
+ FreeRTOS V8.1.2 - Copyright (C) 2014 Real Time Engineers Ltd.\r
All rights reserved\r
\r
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
the terms of the GNU General Public License (version 2) as published by the\r
Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
\r
- >>! NOTE: The modification to the GPL is included to allow you to distribute\r
- >>! a combined work that includes FreeRTOS without being obliged to provide\r
- >>! the source code for proprietary components outside of the FreeRTOS\r
- >>! kernel.\r
+ >>! NOTE: The modification to the GPL is included to allow you to !<<\r
+ >>! distribute a combined work that includes FreeRTOS without being !<<\r
+ >>! obliged to provide the source code for proprietary components !<<\r
+ >>! outside of the FreeRTOS kernel. !<<\r
\r
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
\r
\r
-/* Constants used with the cRxLock and xTxLock structure members. */\r
+/* Constants used with the xRxLock and xTxLock structure members. */\r
#define queueUNLOCKED ( ( BaseType_t ) -1 )\r
#define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )\r
\r
\r
/*\r
* Definition of the queue used by the scheduler.\r
- * Items are queued by copy, not reference.\r
+ * Items are queued by copy, not reference. See the following link for the\r
+ * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
*/\r
typedef struct QueueDefinition\r
{\r
struct QueueDefinition *pxQueueSetContainer;\r
#endif\r
\r
-} Queue_t;\r
+} xQUEUE;\r
+\r
+/* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
+name below to enable the use of older kernel aware debuggers. */\r
+typedef xQUEUE Queue_t;\r
+\r
/*-----------------------------------------------------------*/\r
\r
/*\r
more user friendly. */\r
typedef struct QUEUE_REGISTRY_ITEM\r
{\r
- char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
+ const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
QueueHandle_t xHandle;\r
- } QueueRegistryItem_t;\r
+ } xQueueRegistryItem;\r
+\r
+ /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
+ new xQueueRegistryItem name below to enable the use of older kernel aware\r
+ debuggers. */\r
+ typedef xQueueRegistryItem QueueRegistryItem_t;\r
\r
/* The queue registry is simply an array of QueueRegistryItem_t structures.\r
The pcQueueName member of a structure being NULL is indicative of the\r
* Copies an item into the queue, either at the front of the queue or the\r
* back of the queue.\r
*/\r
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
\r
/*\r
* Copies an item out of a queue.\r
Queue_t *pxNewQueue;\r
size_t xQueueSizeInBytes;\r
QueueHandle_t xReturn = NULL;\r
+int8_t *pcAllocatedBuffer;\r
\r
/* Remove compiler warnings about unused parameters should\r
configUSE_TRACE_FACILITY not be set to 1. */\r
( void ) ucQueueType;\r
\r
- /* Allocate the new queue structure. */\r
- if( uxQueueLength > ( UBaseType_t ) 0 )\r
- {\r
- pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
- if( pxNewQueue != NULL )\r
- {\r
- /* Create the list of pointers to queue items. The queue is one byte\r
- longer than asked for to make wrap checking easier/faster. */\r
- xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+ configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
\r
- pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );\r
- if( pxNewQueue->pcHead != NULL )\r
- {\r
- /* Initialise the queue members as described above where the\r
- queue type is defined. */\r
- pxNewQueue->uxLength = uxQueueLength;\r
- pxNewQueue->uxItemSize = uxItemSize;\r
- ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
+ if( uxItemSize == ( UBaseType_t ) 0 )\r
+ {\r
+ /* There is not going to be a queue storage area. */\r
+ xQueueSizeInBytes = ( size_t ) 0;\r
+ }\r
+ else\r
+ {\r
+ /* The queue is one byte longer than asked for to make wrap checking\r
+ easier/faster. */\r
+ xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+ }\r
\r
- #if ( configUSE_TRACE_FACILITY == 1 )\r
- {\r
- pxNewQueue->ucQueueType = ucQueueType;\r
- }\r
- #endif /* configUSE_TRACE_FACILITY */\r
+ /* Allocate the new queue structure and storage area. */\r
+ pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
\r
- #if( configUSE_QUEUE_SETS == 1 )\r
- {\r
- pxNewQueue->pxQueueSetContainer = NULL;\r
- }\r
- #endif /* configUSE_QUEUE_SETS */\r
+ if( pcAllocatedBuffer != NULL )\r
+ {\r
+ pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be to small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */\r
\r
- traceQUEUE_CREATE( pxNewQueue );\r
- xReturn = pxNewQueue;\r
- }\r
- else\r
- {\r
- traceQUEUE_CREATE_FAILED( ucQueueType );\r
- vPortFree( pxNewQueue );\r
- }\r
+ if( uxItemSize == ( UBaseType_t ) 0 )\r
+ {\r
+ /* No RAM was allocated for the queue storage area, but PC head\r
+ cannot be set to NULL because NULL is used as a key to say the queue\r
+ is used as a mutex. Therefore just set pcHead to point to the queue\r
+ as a benign value that is known to be within the memory map. */\r
+ pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
}\r
else\r
{\r
- mtCOVERAGE_TEST_MARKER();\r
+ /* Jump past the queue structure to find the location of the queue\r
+ storage area - adding the padding bytes to get a better alignment. */\r
+ pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );\r
}\r
+\r
+ /* Initialise the queue members as described above where the queue type\r
+ is defined. */\r
+ pxNewQueue->uxLength = uxQueueLength;\r
+ pxNewQueue->uxItemSize = uxItemSize;\r
+ ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
+\r
+ #if ( configUSE_TRACE_FACILITY == 1 )\r
+ {\r
+ pxNewQueue->ucQueueType = ucQueueType;\r
+ }\r
+ #endif /* configUSE_TRACE_FACILITY */\r
+\r
+ #if( configUSE_QUEUE_SETS == 1 )\r
+ {\r
+ pxNewQueue->pxQueueSetContainer = NULL;\r
+ }\r
+ #endif /* configUSE_QUEUE_SETS */\r
+\r
+ traceQUEUE_CREATE( pxNewQueue );\r
+ xReturn = pxNewQueue;\r
}\r
else\r
{\r
taskEXIT_CRITICAL();\r
\r
return pxReturn;\r
- }\r
+ } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
\r
#endif\r
/*-----------------------------------------------------------*/\r
}\r
else\r
{\r
- /* We cannot give the mutex because we are not the holder. */\r
+ /* The mutex cannot be given because the calling task is not the\r
+ holder. */\r
xReturn = pdFAIL;\r
\r
traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
\r
#if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
\r
- BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xBlockTime )\r
+ BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
{\r
BaseType_t xReturn;\r
Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
}\r
else\r
{\r
- xReturn = xQueueGenericReceive( pxMutex, NULL, xBlockTime, pdFALSE );\r
+ xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
\r
- /* pdPASS will only be returned if we successfully obtained the mutex,\r
- we may have blocked to reach here. */\r
+ /* pdPASS will only be returned if the mutex was successfully\r
+ obtained. The calling task may have entered the Blocked state\r
+ before reaching here. */\r
if( xReturn == pdPASS )\r
{\r
( pxMutex->u.uxRecursiveCallCount )++;\r
\r
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
{\r
-BaseType_t xEntryTimeSet = pdFALSE;\r
+BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
TimeOut_t xTimeOut;\r
Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
\r
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
{\r
traceQUEUE_SEND( pxQueue );\r
- prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+ xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
\r
#if ( configUSE_QUEUE_SETS == 1 )\r
{\r
mtCOVERAGE_TEST_MARKER();\r
}\r
}\r
+ else if( xYieldRequired != pdFALSE )\r
+ {\r
+ /* This path is a special case that will only get\r
+ executed if the task was holding multiple mutexes\r
+ and the mutexes were given back in an order that is\r
+ different to that in which they were taken. */\r
+ queueYIELD_IF_USING_PREEMPTION();\r
+ }\r
else\r
{\r
mtCOVERAGE_TEST_MARKER();\r
mtCOVERAGE_TEST_MARKER();\r
}\r
}\r
+ else if( xYieldRequired != pdFALSE )\r
+ {\r
+ /* This path is a special case that will only get\r
+ executed if the task was holding multiple mutexes and\r
+ the mutexes were given back in an order that is\r
+ different to that in which they were taken. */\r
+ queueYIELD_IF_USING_PREEMPTION();\r
+ }\r
else\r
{\r
mtCOVERAGE_TEST_MARKER();\r
#endif /* configUSE_QUEUE_SETS */\r
\r
taskEXIT_CRITICAL();\r
-\r
- /* Return to the original privilege level before exiting the\r
- function. */\r
return pdPASS;\r
}\r
else\r
{\r
traceQUEUE_PEEK( pxQueue );\r
\r
- /* We are not removing the data, so reset our read\r
+ /* The data is not being removed, so reset our read\r
pointer. */\r
pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
\r
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
\r
- /* Similar to xQueueGenericSend, except we don't block if there is no room\r
- in the queue. Also we don't directly wake a task that was blocked on a\r
- queue read, instead we return a flag to say whether a context switch is\r
- required or not (i.e. has a task with a higher priority than us been woken\r
- by this post). */\r
+ /* Similar to xQueueGenericSend, except without blocking if there is no room\r
+ in the queue. Also don't directly wake a task that was blocked on a queue\r
+ read, instead return a flag to say whether a context switch is required or\r
+ not (i.e. has a task with a higher priority than us been woken by this\r
+ post). */\r
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
{\r
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
{\r
traceQUEUE_SEND_FROM_ISR( pxQueue );\r
\r
- prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+ /* A task can only have an inherited priority if it is a mutex\r
+ holder - and if there is a mutex holder then the mutex cannot be\r
+ given from an ISR. Therefore, unlike the xQueueGenericGive()\r
+ function, there is no need to determine the need for priority\r
+ disinheritance here or to clear the mutex holder TCB member. */\r
+ ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
\r
- /* If the queue is locked we do not alter the event list. This will\r
+ /* The event list is not altered if the queue is locked. This will\r
be done when the queue is unlocked later. */\r
if( pxQueue->xTxLock == queueUNLOCKED )\r
{\r
{\r
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
{\r
- /* The task waiting has a higher priority so record that a\r
- context switch is required. */\r
+ /* The task waiting has a higher priority so\r
+ record that a context switch is required. */\r
+ if( pxHigherPriorityTaskWoken != NULL )\r
+ {\r
+ *pxHigherPriorityTaskWoken = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ }\r
+ #else /* configUSE_QUEUE_SETS */\r
+ {\r
+ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
+ {\r
+ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
+ {\r
+ /* The task waiting has a higher priority so record that a\r
+ context switch is required. */\r
+ if( pxHigherPriorityTaskWoken != NULL )\r
+ {\r
+ *pxHigherPriorityTaskWoken = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ #endif /* configUSE_QUEUE_SETS */\r
+ }\r
+ else\r
+ {\r
+ /* Increment the lock count so the task that unlocks the queue\r
+ knows that data was posted while it was locked. */\r
+ ++( pxQueue->xTxLock );\r
+ }\r
+\r
+ xReturn = pdPASS;\r
+ }\r
+ else\r
+ {\r
+ traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
+ xReturn = errQUEUE_FULL;\r
+ }\r
+ }\r
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+ return xReturn;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xQueueGenericGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
+{\r
+BaseType_t xReturn;\r
+UBaseType_t uxSavedInterruptStatus;\r
+Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
+\r
+ configASSERT( pxQueue );\r
+\r
+ /* xQueueGenericSendFromISR() should be used in the item size is not 0. */\r
+ configASSERT( pxQueue->uxItemSize == 0 );\r
+\r
+ /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+ system call (or maximum API call) interrupt priority. Interrupts that are\r
+ above the maximum system call priority are kept permanently enabled, even\r
+ when the RTOS kernel is in a critical section, but cannot make any calls to\r
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h\r
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+ failure if a FreeRTOS API function is called from an interrupt that has been\r
+ assigned a priority above the configured maximum system call priority.\r
+ Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+ that have been assigned a priority at or (logically) below the maximum\r
+ system call interrupt priority. FreeRTOS maintains a separate interrupt\r
+ safe API to ensure interrupt entry is as fast and as simple as possible.\r
+ More information (albeit Cortex-M specific) is provided on the following\r
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+ /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
+ item size is 0. Don't directly wake a task that was blocked on a queue\r
+ read, instead return a flag to say whether a context switch is required or\r
+ not (i.e. has a task with a higher priority than us been woken by this\r
+ post). */\r
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+ {\r
+ /* When the queue is used to implement a semaphore no data is ever\r
+ moved through the queue but it is still valid to see if the queue 'has\r
+ space'. */\r
+ if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
+ {\r
+ traceQUEUE_SEND_FROM_ISR( pxQueue );\r
+\r
+ /* A task can only have an inherited priority if it is a mutex\r
+ holder - and if there is a mutex holder then the mutex cannot be\r
+ given from an ISR. Therefore, unlike the xQueueGenericGive()\r
+ function, there is no need to determine the need for priority\r
+ disinheritance here or to clear the mutex holder TCB member. */\r
+\r
+ ++( pxQueue->uxMessagesWaiting );\r
+\r
+ /* The event list is not altered if the queue is locked. This will\r
+ be done when the queue is unlocked later. */\r
+ if( pxQueue->xTxLock == queueUNLOCKED )\r
+ {\r
+ #if ( configUSE_QUEUE_SETS == 1 )\r
+ {\r
+ if( pxQueue->pxQueueSetContainer != NULL )\r
+ {\r
+ if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
+ {\r
+ /* The semaphore is a member of a queue set, and\r
+ posting to the queue set caused a higher priority\r
+ task to unblock. A context switch is required. */\r
+ if( pxHigherPriorityTaskWoken != NULL )\r
+ {\r
+ *pxHigherPriorityTaskWoken = pdTRUE;\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ mtCOVERAGE_TEST_MARKER();\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
+ {\r
+ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
+ {\r
+ /* The task waiting has a higher priority so\r
+ record that a context switch is required. */\r
if( pxHigherPriorityTaskWoken != NULL )\r
{\r
*pxHigherPriorityTaskWoken = pdTRUE;\r
{\r
taskENTER_CRITICAL();\r
{\r
- /* Is there data in the queue now? To be running we must be\r
- the highest priority task wanting to access the queue. */\r
+ /* Is there data in the queue now? To be running the calling task\r
+ must be the highest priority task wanting to access the queue. */\r
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
{\r
/* Remember the read position in case the queue is only being\r
{\r
/* Record the information required to implement\r
priority inheritance should it become necessary. */\r
- pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
+ pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
}\r
else\r
{\r
mtCOVERAGE_TEST_MARKER();\r
}\r
}\r
- #endif\r
+ #endif /* configUSE_MUTEXES */\r
\r
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
{\r
\r
configASSERT( pxQueue );\r
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
+ configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
\r
/* RTOS ports that support interrupt nesting have the concept of a maximum\r
system call (or maximum API call) interrupt priority. Interrupts that are\r
vQueueUnregisterQueue( pxQueue );\r
}\r
#endif\r
- if( pxQueue->pcHead != NULL )\r
- {\r
- vPortFree( pxQueue->pcHead );\r
- }\r
vPortFree( pxQueue );\r
}\r
/*-----------------------------------------------------------*/\r
#endif /* configUSE_TRACE_FACILITY */\r
/*-----------------------------------------------------------*/\r
\r
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
{\r
+BaseType_t xReturn = pdFALSE;\r
+\r
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
{\r
#if ( configUSE_MUTEXES == 1 )\r
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
{\r
/* The mutex is no longer being held. */\r
- vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
+ xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
pxQueue->pxMutexHolder = NULL;\r
}\r
else\r
}\r
\r
++( pxQueue->uxMessagesWaiting );\r
+\r
+ return xReturn;\r
}\r
/*-----------------------------------------------------------*/\r
\r
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
{\r
- if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )\r
+ if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
{\r
pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
}\r
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
}\r
- else\r
- {\r
- mtCOVERAGE_TEST_MARKER();\r
- }\r
}\r
/*-----------------------------------------------------------*/\r
\r
\r
#if ( configQUEUE_REGISTRY_SIZE > 0 )\r
\r
- void vQueueAddToRegistry( QueueHandle_t xQueue, char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
+ void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
{\r
UBaseType_t ux;\r
\r
/* Store the information on this queue. */\r
xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
xQueueRegistry[ ux ].xHandle = xQueue;\r
+\r
+ traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
break;\r
}\r
else\r
\r
#if ( configUSE_QUEUE_SETS == 1 )\r
\r
- BaseType_t xQueueAddToSet( QueueSetMember_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
+ BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
{\r
BaseType_t xReturn;\r
\r
- if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
- {\r
- /* Cannot add a queue/semaphore to more than one queue set. */\r
- xReturn = pdFAIL;\r
- }\r
- else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
- {\r
- /* Cannot add a queue/semaphore to a queue set if there are already\r
- items in the queue/semaphore. */\r
- xReturn = pdFAIL;\r
- }\r
- else\r
+ taskENTER_CRITICAL();\r
{\r
- taskENTER_CRITICAL();\r
+ if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
+ {\r
+ /* Cannot add a queue/semaphore to more than one queue set. */\r
+ xReturn = pdFAIL;\r
+ }\r
+ else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
+ {\r
+ /* Cannot add a queue/semaphore to a queue set if there are already\r
+ items in the queue/semaphore. */\r
+ xReturn = pdFAIL;\r
+ }\r
+ else\r
{\r
( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
+ xReturn = pdPASS;\r
}\r
- taskEXIT_CRITICAL();\r
- xReturn = pdPASS;\r
}\r
+ taskEXIT_CRITICAL();\r
\r
return xReturn;\r
}\r
\r
#if ( configUSE_QUEUE_SETS == 1 )\r
\r
- BaseType_t xQueueRemoveFromSet( QueueSetMember_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
+ BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
{\r
BaseType_t xReturn;\r
Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
\r
#if ( configUSE_QUEUE_SETS == 1 )\r
\r
- QueueSetMember_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xBlockTimeTicks )\r
+ QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
{\r
- QueueSetMember_t xReturn = NULL;\r
+ QueueSetMemberHandle_t xReturn = NULL;\r
\r
- ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xBlockTimeTicks, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
+ ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
return xReturn;\r
}\r
\r
\r
#if ( configUSE_QUEUE_SETS == 1 )\r
\r
- QueueSetMember_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
+ QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
{\r
- QueueSetMember_t xReturn = NULL;\r
+ QueueSetMemberHandle_t xReturn = NULL;\r
\r
( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
return xReturn;\r
Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
BaseType_t xReturn = pdFALSE;\r
\r
+ /* This function must be called form a critical section. */\r
+\r
configASSERT( pxQueueSetContainer );\r
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
\r
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
{\r
traceQUEUE_SEND( pxQueueSetContainer );\r
- /* The data copies is the handle of the queue that contains data. */\r
- prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
+ /* The data copied is the handle of the queue that contains data. */\r
+ xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
+\r
if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
{\r
if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r