]> git.sur5r.net Git - freertos/blobdiff - FreeRTOS/Source/queue.c
Kernel changes to improve power saving:
[freertos] / FreeRTOS / Source / queue.c
index b571297254f92c1168bfead0d071cdcf8bcfa7c1..931116fb8bed810f488b9f865854ccaf1a5084f2 100644 (file)
@@ -1,60 +1,64 @@
 /*\r
-    FreeRTOS V8.0.0 - Copyright (C) 2014 Real Time Engineers Ltd.\r
+    FreeRTOS V8.2.1 - Copyright (C) 2015 Real Time Engineers Ltd.\r
     All rights reserved\r
 \r
     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
 \r
-    ***************************************************************************\r
-     *                                                                       *\r
-     *    FreeRTOS provides completely free yet professionally developed,    *\r
-     *    robust, strictly quality controlled, supported, and cross          *\r
-     *    platform software that has become a de facto standard.             *\r
-     *                                                                       *\r
-     *    Help yourself get started quickly and support the FreeRTOS         *\r
-     *    project by purchasing a FreeRTOS tutorial book, reference          *\r
-     *    manual, or both from: http://www.FreeRTOS.org/Documentation        *\r
-     *                                                                       *\r
-     *    Thank you!                                                         *\r
-     *                                                                       *\r
-    ***************************************************************************\r
-\r
     This file is part of the FreeRTOS distribution.\r
 \r
     FreeRTOS is free software; you can redistribute it and/or modify it under\r
     the terms of the GNU General Public License (version 2) as published by the\r
     Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
 \r
-    >>! NOTE: The modification to the GPL is included to allow you to distribute\r
-    >>! a combined work that includes FreeRTOS without being obliged to provide\r
-    >>! the source code for proprietary components outside of the FreeRTOS\r
-    >>! kernel.\r
+    ***************************************************************************\r
+    >>!   NOTE: The modification to the GPL is included to allow you to     !<<\r
+    >>!   distribute a combined work that includes FreeRTOS without being   !<<\r
+    >>!   obliged to provide the source code for proprietary components     !<<\r
+    >>!   outside of the FreeRTOS kernel.                                   !<<\r
+    ***************************************************************************\r
 \r
     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
-    FOR A PARTICULAR PURPOSE.  Full license text is available from the following\r
+    FOR A PARTICULAR PURPOSE.  Full license text is available on the following\r
     link: http://www.freertos.org/a00114.html\r
 \r
-    1 tab == 4 spaces!\r
-\r
     ***************************************************************************\r
      *                                                                       *\r
-     *    Having a problem?  Start by reading the FAQ "My application does   *\r
-     *    not run, what could be wrong?"                                     *\r
+     *    FreeRTOS provides completely free yet professionally developed,    *\r
+     *    robust, strictly quality controlled, supported, and cross          *\r
+     *    platform software that is more than just the market leader, it     *\r
+     *    is the industry's de facto standard.                               *\r
      *                                                                       *\r
-     *    http://www.FreeRTOS.org/FAQHelp.html                               *\r
+     *    Help yourself get started quickly while simultaneously helping     *\r
+     *    to support the FreeRTOS project by purchasing a FreeRTOS           *\r
+     *    tutorial book, reference manual, or both:                          *\r
+     *    http://www.FreeRTOS.org/Documentation                              *\r
      *                                                                       *\r
     ***************************************************************************\r
 \r
-    http://www.FreeRTOS.org - Documentation, books, training, latest versions,\r
-    license and Real Time Engineers Ltd. contact details.\r
+    http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading\r
+    the FAQ page "My application does not run, what could be wrong?".  Have you\r
+    defined configASSERT()?\r
+\r
+    http://www.FreeRTOS.org/support - In return for receiving this top quality\r
+    embedded software for free we request you assist our global community by\r
+    participating in the support forum.\r
+\r
+    http://www.FreeRTOS.org/training - Investing in training allows your team to\r
+    be as productive as possible as early as possible.  Now you can receive\r
+    FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers\r
+    Ltd, and the world's leading authority on the world's leading RTOS.\r
 \r
     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
 \r
-    http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High\r
-    Integrity Systems to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
-    licenses offer ticketed support, indemnification and middleware.\r
+    http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
+    Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
+\r
+    http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High\r
+    Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
+    licenses offer ticketed support, indemnification and commercial middleware.\r
 \r
     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
     engineered and independently SIL3 certified version for use in safety and\r
@@ -120,7 +124,8 @@ zero. */
 \r
 /*\r
  * Definition of the queue used by the scheduler.\r
- * Items are queued by copy, not reference.\r
+ * Items are queued by copy, not reference.  See the following link for the\r
+ * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
  */\r
 typedef struct QueueDefinition\r
 {\r
@@ -153,7 +158,12 @@ typedef struct QueueDefinition
                struct QueueDefinition *pxQueueSetContainer;\r
        #endif\r
 \r
-} Queue_t;\r
+} xQUEUE;\r
+\r
+/* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
+name below to enable the use of older kernel aware debuggers. */\r
+typedef xQUEUE Queue_t;\r
+\r
 /*-----------------------------------------------------------*/\r
 \r
 /*\r
@@ -169,7 +179,12 @@ typedef struct QueueDefinition
        {\r
                const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
                QueueHandle_t xHandle;\r
-       } QueueRegistryItem_t;\r
+       } xQueueRegistryItem;\r
+\r
+       /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
+       new xQueueRegistryItem name below to enable the use of older kernel aware\r
+       debuggers. */\r
+       typedef xQueueRegistryItem QueueRegistryItem_t;\r
 \r
        /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
        The pcQueueName member of a structure being NULL is indicative of the\r
@@ -206,7 +221,7 @@ static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
  * Copies an item into the queue, either at the front of the queue or the\r
  * back of the queue.\r
  */\r
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
 \r
 /*\r
  * Copies an item out of a queue.\r
@@ -300,55 +315,68 @@ QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseT
 Queue_t *pxNewQueue;\r
 size_t xQueueSizeInBytes;\r
 QueueHandle_t xReturn = NULL;\r
+int8_t *pcAllocatedBuffer;\r
 \r
        /* Remove compiler warnings about unused parameters should\r
        configUSE_TRACE_FACILITY not be set to 1. */\r
        ( void ) ucQueueType;\r
 \r
-       /* Allocate the new queue structure. */\r
-       if( uxQueueLength > ( UBaseType_t ) 0 )\r
-       {\r
-               pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
-               if( pxNewQueue != NULL )\r
-               {\r
-                       /* Create the list of pointers to queue items.  The queue is one byte\r
-                       longer than asked for to make wrap checking easier/faster. */\r
-                       xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+       configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
 \r
-                       pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );\r
-                       if( pxNewQueue->pcHead != NULL )\r
-                       {\r
-                               /* Initialise the queue members as described above where the\r
-                               queue type is defined. */\r
-                               pxNewQueue->uxLength = uxQueueLength;\r
-                               pxNewQueue->uxItemSize = uxItemSize;\r
-                               ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
+       if( uxItemSize == ( UBaseType_t ) 0 )\r
+       {\r
+               /* There is not going to be a queue storage area. */\r
+               xQueueSizeInBytes = ( size_t ) 0;\r
+       }\r
+       else\r
+       {\r
+               /* The queue is one byte longer than asked for to make wrap checking\r
+               easier/faster. */\r
+               xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+       }\r
 \r
-                               #if ( configUSE_TRACE_FACILITY == 1 )\r
-                               {\r
-                                       pxNewQueue->ucQueueType = ucQueueType;\r
-                               }\r
-                               #endif /* configUSE_TRACE_FACILITY */\r
+       /* Allocate the new queue structure and storage area. */\r
+       pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
 \r
-                               #if( configUSE_QUEUE_SETS == 1 )\r
-                               {\r
-                                       pxNewQueue->pxQueueSetContainer = NULL;\r
-                               }\r
-                               #endif /* configUSE_QUEUE_SETS */\r
+       if( pcAllocatedBuffer != NULL )\r
+       {\r
+               pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be too small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */\r
 \r
-                               traceQUEUE_CREATE( pxNewQueue );\r
-                               xReturn = pxNewQueue;\r
-                       }\r
-                       else\r
-                       {\r
-                               traceQUEUE_CREATE_FAILED( ucQueueType );\r
-                               vPortFree( pxNewQueue );\r
-                       }\r
+               if( uxItemSize == ( UBaseType_t ) 0 )\r
+               {\r
+                       /* No RAM was allocated for the queue storage area, but PC head\r
+                       cannot be set to NULL because NULL is used as a key to say the queue\r
+                       is used as a mutex.  Therefore just set pcHead to point to the queue\r
+                       as a benign value that is known to be within the memory map. */\r
+                       pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
                }\r
                else\r
                {\r
-                       mtCOVERAGE_TEST_MARKER();\r
+                       /* Jump past the queue structure to find the location of the queue\r
+                       storage area - adding the padding bytes to get a better alignment. */\r
+                       pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );\r
+               }\r
+\r
+               /* Initialise the queue members as described above where the queue type\r
+               is defined. */\r
+               pxNewQueue->uxLength = uxQueueLength;\r
+               pxNewQueue->uxItemSize = uxItemSize;\r
+               ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
+\r
+               #if ( configUSE_TRACE_FACILITY == 1 )\r
+               {\r
+                       pxNewQueue->ucQueueType = ucQueueType;\r
                }\r
+               #endif /* configUSE_TRACE_FACILITY */\r
+\r
+               #if( configUSE_QUEUE_SETS == 1 )\r
+               {\r
+                       pxNewQueue->pxQueueSetContainer = NULL;\r
+               }\r
+               #endif /* configUSE_QUEUE_SETS */\r
+\r
+               traceQUEUE_CREATE( pxNewQueue );\r
+               xReturn = pxNewQueue;\r
        }\r
        else\r
        {\r
@@ -451,7 +479,7 @@ QueueHandle_t xReturn = NULL;
                taskEXIT_CRITICAL();\r
 \r
                return pxReturn;\r
-       }\r
+       } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
 \r
 #endif\r
 /*-----------------------------------------------------------*/\r
@@ -498,7 +526,8 @@ QueueHandle_t xReturn = NULL;
                }\r
                else\r
                {\r
-                       /* We cannot give the mutex because we are not the holder. */\r
+                       /* The mutex cannot be given because the calling task is not the\r
+                       holder. */\r
                        xReturn = pdFAIL;\r
 \r
                        traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
@@ -533,8 +562,9 @@ QueueHandle_t xReturn = NULL;
                {\r
                        xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
 \r
-                       /* pdPASS will only be returned if we successfully obtained the mutex,\r
-                       we may have blocked to reach here. */\r
+                       /* pdPASS will only be returned if the mutex was successfully\r
+                       obtained.  The calling task may have entered the Blocked state\r
+                       before reaching here. */\r
                        if( xReturn == pdPASS )\r
                        {\r
                                ( pxMutex->u.uxRecursiveCallCount )++;\r
@@ -582,7 +612,7 @@ QueueHandle_t xReturn = NULL;
 \r
 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
 {\r
-BaseType_t xEntryTimeSet = pdFALSE;\r
+BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
 TimeOut_t xTimeOut;\r
 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
 \r
@@ -603,14 +633,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
        {\r
                taskENTER_CRITICAL();\r
                {\r
-                       /* Is there room on the queue now?  The running task must be\r
-                       the highest priority task wanting to access the queue.  If\r
-                       the head item in the queue is to be overwritten then it does\r
-                       not matter if the queue is full. */\r
+                       /* Is there room on the queue now?  The running task must be the\r
+                       highest priority task wanting to access the queue.  If the head item\r
+                       in the queue is to be overwritten then it does not matter if the\r
+                       queue is full. */\r
                        if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
                        {\r
                                traceQUEUE_SEND( pxQueue );\r
-                               prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+                               xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
 \r
                                #if ( configUSE_QUEUE_SETS == 1 )\r
                                {\r
@@ -647,6 +677,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                                mtCOVERAGE_TEST_MARKER();\r
                                                        }\r
                                                }\r
+                                               else if( xYieldRequired != pdFALSE )\r
+                                               {\r
+                                                       /* This path is a special case that will only get\r
+                                                       executed if the task was holding multiple mutexes\r
+                                                       and the mutexes were given back in an order that is\r
+                                                       different to that in which they were taken. */\r
+                                                       queueYIELD_IF_USING_PREEMPTION();\r
+                                               }\r
                                                else\r
                                                {\r
                                                        mtCOVERAGE_TEST_MARKER();\r
@@ -672,6 +710,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                        mtCOVERAGE_TEST_MARKER();\r
                                                }\r
                                        }\r
+                                       else if( xYieldRequired != pdFALSE )\r
+                                       {\r
+                                               /* This path is a special case that will only get\r
+                                               executed if the task was holding multiple mutexes and\r
+                                               the mutexes were given back in an order that is\r
+                                               different to that in which they were taken. */\r
+                                               queueYIELD_IF_USING_PREEMPTION();\r
+                                       }\r
                                        else\r
                                        {\r
                                                mtCOVERAGE_TEST_MARKER();\r
@@ -680,9 +726,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                #endif /* configUSE_QUEUE_SETS */\r
 \r
                                taskEXIT_CRITICAL();\r
-\r
-                               /* Return to the original privilege level before exiting the\r
-                               function. */\r
                                return pdPASS;\r
                        }\r
                        else\r
@@ -918,7 +961,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                        {\r
                                                traceQUEUE_PEEK( pxQueue );\r
 \r
-                                               /* We are not removing the data, so reset our read\r
+                                               /* The data is not being removed, so reset our read\r
                                                pointer. */\r
                                                pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
 \r
@@ -1040,8 +1083,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
 \r
        /* Similar to xQueueGenericSend, except without blocking if there is no room\r
        in the queue.  Also don't directly wake a task that was blocked on a queue\r
-       read, instead return a flag to say whether a context switch is required or \r
-       not (i.e. has a task with a higher priority than us been woken by this \r
+       read, instead return a flag to say whether a context switch is required or\r
+       not (i.e. has a task with a higher priority than us been woken by this\r
        post). */\r
        uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
        {\r
@@ -1049,7 +1092,12 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                {\r
                        traceQUEUE_SEND_FROM_ISR( pxQueue );\r
 \r
-                       prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+                       /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
+                       semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
+                       in a task disinheriting a priority and prvCopyDataToQueue() can be\r
+                       called here even though the disinherit function does not check if\r
+                       the scheduler is suspended before accessing the ready lists. */\r
+                       ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
 \r
                        /* The event list is not altered if the queue is locked.  This will\r
                        be done when the queue is unlocked later. */\r
@@ -1084,8 +1132,169 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                {\r
                                                        if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
                                                        {\r
-                                                               /* The task waiting has a higher priority so record that a\r
-                                                               context switch is required. */\r
+                                                               /* The task waiting has a higher priority so\r
+                                                               record that a context switch is required. */\r
+                                                               if( pxHigherPriorityTaskWoken != NULL )\r
+                                                               {\r
+                                                                       *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                                               }\r
+                                                               else\r
+                                                               {\r
+                                                                       mtCOVERAGE_TEST_MARKER();\r
+                                                               }\r
+                                                       }\r
+                                                       else\r
+                                                       {\r
+                                                               mtCOVERAGE_TEST_MARKER();\r
+                                                       }\r
+                                               }\r
+                                               else\r
+                                               {\r
+                                                       mtCOVERAGE_TEST_MARKER();\r
+                                               }\r
+                                       }\r
+                               }\r
+                               #else /* configUSE_QUEUE_SETS */\r
+                               {\r
+                                       if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
+                                       {\r
+                                               if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
+                                               {\r
+                                                       /* The task waiting has a higher priority so record that a\r
+                                                       context switch is required. */\r
+                                                       if( pxHigherPriorityTaskWoken != NULL )\r
+                                                       {\r
+                                                               *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                                       }\r
+                                                       else\r
+                                                       {\r
+                                                               mtCOVERAGE_TEST_MARKER();\r
+                                                       }\r
+                                               }\r
+                                               else\r
+                                               {\r
+                                                       mtCOVERAGE_TEST_MARKER();\r
+                                               }\r
+                                       }\r
+                                       else\r
+                                       {\r
+                                               mtCOVERAGE_TEST_MARKER();\r
+                                       }\r
+                               }\r
+                               #endif /* configUSE_QUEUE_SETS */\r
+                       }\r
+                       else\r
+                       {\r
+                               /* Increment the lock count so the task that unlocks the queue\r
+                               knows that data was posted while it was locked. */\r
+                               ++( pxQueue->xTxLock );\r
+                       }\r
+\r
+                       xReturn = pdPASS;\r
+               }\r
+               else\r
+               {\r
+                       traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
+                       xReturn = errQUEUE_FULL;\r
+               }\r
+       }\r
+       portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+       return xReturn;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
+{\r
+BaseType_t xReturn;\r
+UBaseType_t uxSavedInterruptStatus;\r
+Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
+\r
+       /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
+       item size is 0.  Don't directly wake a task that was blocked on a queue\r
+       read, instead return a flag to say whether a context switch is required or\r
+       not (i.e. has a task with a higher priority than us been woken by this\r
+       post). */\r
+\r
+       configASSERT( pxQueue );\r
+\r
+       /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
+       if the item size is not 0. */\r
+       configASSERT( pxQueue->uxItemSize == 0 );\r
+\r
+       /* Normally a mutex would not be given from an interrupt, and doing so is\r
+       definitely wrong if there is a mutex holder as priority inheritance makes no\r
+       sense for an interrupts, only tasks. */\r
+       configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );\r
+\r
+       /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+       system call (or maximum API call) interrupt priority.  Interrupts that are\r
+       above the maximum system call priority are kept permanently enabled, even\r
+       when the RTOS kernel is in a critical section, but cannot make any calls to\r
+       FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
+       then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+       failure if a FreeRTOS API function is called from an interrupt that has been\r
+       assigned a priority above the configured maximum system call priority.\r
+       Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+       that have been assigned a priority at or (logically) below the maximum\r
+       system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
+       safe API to ensure interrupt entry is as fast and as simple as possible.\r
+       More information (albeit Cortex-M specific) is provided on the following\r
+       link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+       portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+       uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+       {\r
+               /* When the queue is used to implement a semaphore no data is ever\r
+               moved through the queue but it is still valid to see if the queue 'has\r
+               space'. */\r
+               if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
+               {\r
+                       traceQUEUE_SEND_FROM_ISR( pxQueue );\r
+\r
+                       /* A task can only have an inherited priority if it is a mutex\r
+                       holder - and if there is a mutex holder then the mutex cannot be\r
+                       given from an ISR.  As this is the ISR version of the function it\r
+                       can be assumed there is no mutex holder and no need to determine if\r
+                       priority disinheritance is needed.  Simply increase the count of\r
+                       messages (semaphores) available. */\r
+                       ++( pxQueue->uxMessagesWaiting );\r
+\r
+                       /* The event list is not altered if the queue is locked.  This will\r
+                       be done when the queue is unlocked later. */\r
+                       if( pxQueue->xTxLock == queueUNLOCKED )\r
+                       {\r
+                               #if ( configUSE_QUEUE_SETS == 1 )\r
+                               {\r
+                                       if( pxQueue->pxQueueSetContainer != NULL )\r
+                                       {\r
+                                               if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
+                                               {\r
+                                                       /* The semaphore is a member of a queue set, and\r
+                                                       posting to the queue set caused a higher priority\r
+                                                       task to unblock.  A context switch is required. */\r
+                                                       if( pxHigherPriorityTaskWoken != NULL )\r
+                                                       {\r
+                                                               *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                                       }\r
+                                                       else\r
+                                                       {\r
+                                                               mtCOVERAGE_TEST_MARKER();\r
+                                                       }\r
+                                               }\r
+                                               else\r
+                                               {\r
+                                                       mtCOVERAGE_TEST_MARKER();\r
+                                               }\r
+                                       }\r
+                                       else\r
+                                       {\r
+                                               if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
+                                               {\r
+                                                       if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
+                                                       {\r
+                                                               /* The task waiting has a higher priority so\r
+                                                               record that a context switch is required. */\r
                                                                if( pxHigherPriorityTaskWoken != NULL )\r
                                                                {\r
                                                                        *pxHigherPriorityTaskWoken = pdTRUE;\r
@@ -1179,8 +1388,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
        {\r
                taskENTER_CRITICAL();\r
                {\r
-                       /* Is there data in the queue now?  To be running we must be\r
-                       the highest priority task wanting to access the queue. */\r
+                       /* Is there data in the queue now?  To be running the calling task\r
+                       must be the highest priority task wanting to access the queue. */\r
                        if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
                        {\r
                                /* Remember the read position in case the queue is only being\r
@@ -1202,14 +1411,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                {\r
                                                        /* Record the information required to implement\r
                                                        priority inheritance should it become necessary. */\r
-                                                       pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
+                                                       pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
                                                }\r
                                                else\r
                                                {\r
                                                        mtCOVERAGE_TEST_MARKER();\r
                                                }\r
                                        }\r
-                                       #endif\r
+                                       #endif /* configUSE_MUTEXES */\r
 \r
                                        if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
                                        {\r
@@ -1239,8 +1448,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                        any other tasks waiting for the data. */\r
                                        if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
                                        {\r
-                                               /* Tasks that are removed from the event list will get added to\r
-                                               the pending ready list as the scheduler is still suspended. */\r
                                                if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
                                                {\r
                                                        /* The task waiting has a higher priority than this task. */\r
@@ -1441,6 +1648,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
 \r
        configASSERT( pxQueue );\r
        configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
+       configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
 \r
        /* RTOS ports that support interrupt nesting have the concept of a maximum\r
        system call (or maximum API call) interrupt priority.  Interrupts that are\r
@@ -1543,10 +1751,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                vQueueUnregisterQueue( pxQueue );\r
        }\r
        #endif\r
-       if( pxQueue->pcHead != NULL )\r
-       {\r
-               vPortFree( pxQueue->pcHead );\r
-       }\r
        vPortFree( pxQueue );\r
 }\r
 /*-----------------------------------------------------------*/\r
@@ -1581,8 +1785,10 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
 #endif /* configUSE_TRACE_FACILITY */\r
 /*-----------------------------------------------------------*/\r
 \r
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
 {\r
+BaseType_t xReturn = pdFALSE;\r
+\r
        if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
        {\r
                #if ( configUSE_MUTEXES == 1 )\r
@@ -1590,7 +1796,7 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
                        if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
                        {\r
                                /* The mutex is no longer being held. */\r
-                               vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
+                               xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
                                pxQueue->pxMutexHolder = NULL;\r
                        }\r
                        else\r
@@ -1648,12 +1854,14 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
        }\r
 \r
        ++( pxQueue->uxMessagesWaiting );\r
+\r
+       return xReturn;\r
 }\r
 /*-----------------------------------------------------------*/\r
 \r
 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
 {\r
-       if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )\r
+       if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
        {\r
                pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
                if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
@@ -1666,10 +1874,6 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
                }\r
                ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
        }\r
-       else\r
-       {\r
-               mtCOVERAGE_TEST_MARKER();\r
-       }\r
 }\r
 /*-----------------------------------------------------------*/\r
 \r
@@ -2199,7 +2403,7 @@ BaseType_t xReturn;
 \r
 #if ( configUSE_TIMERS == 1 )\r
 \r
-       void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )\r
+       void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
        {\r
        Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
 \r
@@ -2221,7 +2425,7 @@ BaseType_t xReturn;
                if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
                {\r
                        /* There is nothing in the queue, block for the specified period. */\r
-                       vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
+                       vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
                }\r
                else\r
                {\r
@@ -2350,21 +2554,30 @@ BaseType_t xReturn;
        BaseType_t xReturn = pdFALSE;\r
 \r
                /* This function must be called form a critical section. */\r
-       \r
+\r
                configASSERT( pxQueueSetContainer );\r
                configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
 \r
                if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
                {\r
                        traceQUEUE_SEND( pxQueueSetContainer );\r
-                       /* The data copies is the handle of the queue that contains data. */\r
-                       prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
-                       if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
+\r
+                       /* The data copied is the handle of the queue that contains data. */\r
+                       xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
+\r
+                       if( pxQueueSetContainer->xTxLock == queueUNLOCKED )\r
                        {\r
-                               if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
+                               if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
                                {\r
-                                       /* The task waiting has a higher priority */\r
-                                       xReturn = pdTRUE;\r
+                                       if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
+                                       {\r
+                                               /* The task waiting has a higher priority. */\r
+                                               xReturn = pdTRUE;\r
+                                       }\r
+                                       else\r
+                                       {\r
+                                               mtCOVERAGE_TEST_MARKER();\r
+                                       }\r
                                }\r
                                else\r
                                {\r
@@ -2373,7 +2586,7 @@ BaseType_t xReturn;
                        }\r
                        else\r
                        {\r
-                               mtCOVERAGE_TEST_MARKER();\r
+                               ( pxQueueSetContainer->xTxLock )++;\r
                        }\r
                }\r
                else\r