]> git.sur5r.net Git - freertos/blobdiff - FreeRTOS/Source/queue.c
+ New feature added: Task notifications.
[freertos] / FreeRTOS / Source / queue.c
index 278178fe099cb1d540b58e51c7c983fb79fac399..a7c6e749c54dd4cf55fc7f12fa51d127058d197d 100644 (file)
@@ -1,5 +1,5 @@
 /*\r
-    FreeRTOS V8.0.1 - Copyright (C) 2014 Real Time Engineers Ltd.\r
+    FreeRTOS V8.1.2 - Copyright (C) 2014 Real Time Engineers Ltd.\r
     All rights reserved\r
 \r
     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
@@ -120,7 +120,8 @@ zero. */
 \r
 /*\r
  * Definition of the queue used by the scheduler.\r
- * Items are queued by copy, not reference.\r
+ * Items are queued by copy, not reference.  See the following link for the\r
+ * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
  */\r
 typedef struct QueueDefinition\r
 {\r
@@ -310,55 +311,68 @@ QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseT
 Queue_t *pxNewQueue;\r
 size_t xQueueSizeInBytes;\r
 QueueHandle_t xReturn = NULL;\r
+int8_t *pcAllocatedBuffer;\r
 \r
        /* Remove compiler warnings about unused parameters should\r
        configUSE_TRACE_FACILITY not be set to 1. */\r
        ( void ) ucQueueType;\r
 \r
-       /* Allocate the new queue structure. */\r
-       if( uxQueueLength > ( UBaseType_t ) 0 )\r
-       {\r
-               pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
-               if( pxNewQueue != NULL )\r
-               {\r
-                       /* Create the list of pointers to queue items.  The queue is one byte\r
-                       longer than asked for to make wrap checking easier/faster. */\r
-                       xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+       configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
 \r
-                       pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );\r
-                       if( pxNewQueue->pcHead != NULL )\r
-                       {\r
-                               /* Initialise the queue members as described above where the\r
-                               queue type is defined. */\r
-                               pxNewQueue->uxLength = uxQueueLength;\r
-                               pxNewQueue->uxItemSize = uxItemSize;\r
-                               ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
+       if( uxItemSize == ( UBaseType_t ) 0 )\r
+       {\r
+               /* There is not going to be a queue storage area. */\r
+               xQueueSizeInBytes = ( size_t ) 0;\r
+       }\r
+       else\r
+       {\r
+               /* The queue is one byte longer than asked for to make wrap checking\r
+               easier/faster. */\r
+               xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
+       }\r
 \r
-                               #if ( configUSE_TRACE_FACILITY == 1 )\r
-                               {\r
-                                       pxNewQueue->ucQueueType = ucQueueType;\r
-                               }\r
-                               #endif /* configUSE_TRACE_FACILITY */\r
+       /* Allocate the new queue structure and storage area. */\r
+       pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
 \r
-                               #if( configUSE_QUEUE_SETS == 1 )\r
-                               {\r
-                                       pxNewQueue->pxQueueSetContainer = NULL;\r
-                               }\r
-                               #endif /* configUSE_QUEUE_SETS */\r
+       if( pcAllocatedBuffer != NULL )\r
+       {\r
+               pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be to small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */\r
 \r
-                               traceQUEUE_CREATE( pxNewQueue );\r
-                               xReturn = pxNewQueue;\r
-                       }\r
-                       else\r
-                       {\r
-                               traceQUEUE_CREATE_FAILED( ucQueueType );\r
-                               vPortFree( pxNewQueue );\r
-                       }\r
+               if( uxItemSize == ( UBaseType_t ) 0 )\r
+               {\r
+                       /* No RAM was allocated for the queue storage area, but PC head\r
+                       cannot be set to NULL because NULL is used as a key to say the queue\r
+                       is used as a mutex.  Therefore just set pcHead to point to the queue\r
+                       as a benign value that is known to be within the memory map. */\r
+                       pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
                }\r
                else\r
                {\r
-                       mtCOVERAGE_TEST_MARKER();\r
+                       /* Jump past the queue structure to find the location of the queue\r
+                       storage area - adding the padding bytes to get a better alignment. */\r
+                       pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );\r
                }\r
+\r
+               /* Initialise the queue members as described above where the queue type\r
+               is defined. */\r
+               pxNewQueue->uxLength = uxQueueLength;\r
+               pxNewQueue->uxItemSize = uxItemSize;\r
+               ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
+\r
+               #if ( configUSE_TRACE_FACILITY == 1 )\r
+               {\r
+                       pxNewQueue->ucQueueType = ucQueueType;\r
+               }\r
+               #endif /* configUSE_TRACE_FACILITY */\r
+\r
+               #if( configUSE_QUEUE_SETS == 1 )\r
+               {\r
+                       pxNewQueue->pxQueueSetContainer = NULL;\r
+               }\r
+               #endif /* configUSE_QUEUE_SETS */\r
+\r
+               traceQUEUE_CREATE( pxNewQueue );\r
+               xReturn = pxNewQueue;\r
        }\r
        else\r
        {\r
@@ -421,10 +435,7 @@ QueueHandle_t xReturn = NULL;
 \r
                        traceCREATE_MUTEX( pxNewQueue );\r
 \r
-                       /* Start with the semaphore in the expected state.  Preload the\r
-                        mutex held count as calling xQueueGenericSend() will decrement the\r
-                        count back to 0. */\r
-                       vTaskIncrementMutexHeldCount();\r
+                       /* Start with the semaphore in the expected state. */\r
                        ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
                }\r
                else\r
@@ -464,7 +475,7 @@ QueueHandle_t xReturn = NULL;
                taskEXIT_CRITICAL();\r
 \r
                return pxReturn;\r
-       }\r
+       } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
 \r
 #endif\r
 /*-----------------------------------------------------------*/\r
@@ -702,7 +713,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                the mutexes were given back in an order that is\r
                                                different to that in which they were taken. */\r
                                                queueYIELD_IF_USING_PREEMPTION();\r
-                                       }                                       \r
+                                       }\r
                                        else\r
                                        {\r
                                                mtCOVERAGE_TEST_MARKER();\r
@@ -946,7 +957,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                        {\r
                                                traceQUEUE_PEEK( pxQueue );\r
 \r
-                                               /* We are not removing the data, so reset our read\r
+                                               /* The data is not being removed, so reset our read\r
                                                pointer. */\r
                                                pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
 \r
@@ -1077,20 +1088,166 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                {\r
                        traceQUEUE_SEND_FROM_ISR( pxQueue );\r
 \r
-                       if( prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ) != pdFALSE )\r
+                       /* A task can only have an inherited priority if it is a mutex\r
+                       holder - and if there is a mutex holder then the mutex cannot be\r
+                       given from an ISR.  Therefore, unlike the xQueueGenericGive()\r
+                       function, there is no need to determine the need for priority\r
+                       disinheritance here or to clear the mutex holder TCB member. */\r
+                       ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+\r
+                       /* The event list is not altered if the queue is locked.  This will\r
+                       be done when the queue is unlocked later. */\r
+                       if( pxQueue->xTxLock == queueUNLOCKED )\r
                        {\r
-                               /* This is a special case that can only be executed if a task\r
-                               holds multiple mutexes and then gives the mutexes back in an\r
-                               order that is different to that in which they were taken. */\r
-                               if( pxHigherPriorityTaskWoken != NULL )\r
+                               #if ( configUSE_QUEUE_SETS == 1 )\r
                                {\r
-                                       *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                       if( pxQueue->pxQueueSetContainer != NULL )\r
+                                       {\r
+                                               if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
+                                               {\r
+                                                       /* The queue is a member of a queue set, and posting\r
+                                                       to the queue set caused a higher priority task to\r
+                                                       unblock.  A context switch is required. */\r
+                                                       if( pxHigherPriorityTaskWoken != NULL )\r
+                                                       {\r
+                                                               *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                                       }\r
+                                                       else\r
+                                                       {\r
+                                                               mtCOVERAGE_TEST_MARKER();\r
+                                                       }\r
+                                               }\r
+                                               else\r
+                                               {\r
+                                                       mtCOVERAGE_TEST_MARKER();\r
+                                               }\r
+                                       }\r
+                                       else\r
+                                       {\r
+                                               if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
+                                               {\r
+                                                       if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
+                                                       {\r
+                                                               /* The task waiting has a higher priority so\r
+                                                               record that a context switch is required. */\r
+                                                               if( pxHigherPriorityTaskWoken != NULL )\r
+                                                               {\r
+                                                                       *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                                               }\r
+                                                               else\r
+                                                               {\r
+                                                                       mtCOVERAGE_TEST_MARKER();\r
+                                                               }\r
+                                                       }\r
+                                                       else\r
+                                                       {\r
+                                                               mtCOVERAGE_TEST_MARKER();\r
+                                                       }\r
+                                               }\r
+                                               else\r
+                                               {\r
+                                                       mtCOVERAGE_TEST_MARKER();\r
+                                               }\r
+                                       }\r
                                }\r
-                               else\r
+                               #else /* configUSE_QUEUE_SETS */\r
                                {\r
-                                       mtCOVERAGE_TEST_MARKER();\r
+                                       if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
+                                       {\r
+                                               if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
+                                               {\r
+                                                       /* The task waiting has a higher priority so record that a\r
+                                                       context switch is required. */\r
+                                                       if( pxHigherPriorityTaskWoken != NULL )\r
+                                                       {\r
+                                                               *pxHigherPriorityTaskWoken = pdTRUE;\r
+                                                       }\r
+                                                       else\r
+                                                       {\r
+                                                               mtCOVERAGE_TEST_MARKER();\r
+                                                       }\r
+                                               }\r
+                                               else\r
+                                               {\r
+                                                       mtCOVERAGE_TEST_MARKER();\r
+                                               }\r
+                                       }\r
+                                       else\r
+                                       {\r
+                                               mtCOVERAGE_TEST_MARKER();\r
+                                       }\r
                                }\r
+                               #endif /* configUSE_QUEUE_SETS */\r
                        }\r
+                       else\r
+                       {\r
+                               /* Increment the lock count so the task that unlocks the queue\r
+                               knows that data was posted while it was locked. */\r
+                               ++( pxQueue->xTxLock );\r
+                       }\r
+\r
+                       xReturn = pdPASS;\r
+               }\r
+               else\r
+               {\r
+                       traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
+                       xReturn = errQUEUE_FULL;\r
+               }\r
+       }\r
+       portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
+\r
+       return xReturn;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xQueueGenericGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
+{\r
+BaseType_t xReturn;\r
+UBaseType_t uxSavedInterruptStatus;\r
+Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
+\r
+       configASSERT( pxQueue );\r
+\r
+       /* xQueueGenericSendFromISR() should be used in the item size is not 0. */\r
+       configASSERT( pxQueue->uxItemSize == 0 );\r
+\r
+       /* RTOS ports that support interrupt nesting have the concept of a maximum\r
+       system call (or maximum API call) interrupt priority.  Interrupts that are\r
+       above the maximum system call priority are kept permanently enabled, even\r
+       when the RTOS kernel is in a critical section, but cannot make any calls to\r
+       FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
+       then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
+       failure if a FreeRTOS API function is called from an interrupt that has been\r
+       assigned a priority above the configured maximum system call priority.\r
+       Only FreeRTOS functions that end in FromISR can be called from interrupts\r
+       that have been assigned a priority at or (logically) below the maximum\r
+       system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
+       safe API to ensure interrupt entry is as fast and as simple as possible.\r
+       More information (albeit Cortex-M specific) is provided on the following\r
+       link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
+       portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
+\r
+       /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
+       item size is 0.  Don't directly wake a task that was blocked on a queue\r
+       read, instead return a flag to say whether a context switch is required or\r
+       not (i.e. has a task with a higher priority than us been woken by this\r
+       post). */\r
+       uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
+       {\r
+               /* When the queue is used to implement a semaphore no data is ever\r
+               moved through the queue but it is still valid to see if the queue 'has\r
+               space'. */\r
+               if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
+               {\r
+                       traceQUEUE_SEND_FROM_ISR( pxQueue );\r
+\r
+                       /* A task can only have an inherited priority if it is a mutex\r
+                       holder - and if there is a mutex holder then the mutex cannot be\r
+                       given from an ISR.  Therefore, unlike the xQueueGenericGive()\r
+                       function, there is no need to determine the need for priority\r
+                       disinheritance here or to clear the mutex holder TCB member. */\r
+\r
+                       ++( pxQueue->uxMessagesWaiting );\r
 \r
                        /* The event list is not altered if the queue is locked.  This will\r
                        be done when the queue is unlocked later. */\r
@@ -1100,11 +1257,11 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                {\r
                                        if( pxQueue->pxQueueSetContainer != NULL )\r
                                        {\r
-                                               if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
+                                               if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
                                                {\r
-                                                       /* The queue is a member of a queue set, and posting\r
-                                                       to the queue set caused a higher priority task to\r
-                                                       unblock.  A context switch is required. */\r
+                                                       /* The semaphore is a member of a queue set, and\r
+                                                       posting to the queue set caused a higher priority\r
+                                                       task to unblock.  A context switch is required. */\r
                                                        if( pxHigherPriorityTaskWoken != NULL )\r
                                                        {\r
                                                                *pxHigherPriorityTaskWoken = pdTRUE;\r
@@ -1125,8 +1282,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                {\r
                                                        if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
                                                        {\r
-                                                               /* The task waiting has a higher priority so record that a\r
-                                                               context switch is required. */\r
+                                                               /* The task waiting has a higher priority so\r
+                                                               record that a context switch is required. */\r
                                                                if( pxHigherPriorityTaskWoken != NULL )\r
                                                                {\r
                                                                        *pxHigherPriorityTaskWoken = pdTRUE;\r
@@ -1220,8 +1377,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
        {\r
                taskENTER_CRITICAL();\r
                {\r
-                       /* Is there data in the queue now?  To be running we must be\r
-                       the highest priority task wanting to access the queue. */\r
+                       /* Is there data in the queue now?  To be running the calling task\r
+                       must be the highest priority task wanting to access the queue. */\r
                        if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
                        {\r
                                /* Remember the read position in case the queue is only being\r
@@ -1243,14 +1400,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                {\r
                                                        /* Record the information required to implement\r
                                                        priority inheritance should it become necessary. */\r
-                                                       pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
+                                                       pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
                                                }\r
                                                else\r
                                                {\r
                                                        mtCOVERAGE_TEST_MARKER();\r
                                                }\r
                                        }\r
-                                       #endif\r
+                                       #endif /* configUSE_MUTEXES */\r
 \r
                                        if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
                                        {\r
@@ -1482,6 +1639,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
 \r
        configASSERT( pxQueue );\r
        configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
+       configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
 \r
        /* RTOS ports that support interrupt nesting have the concept of a maximum\r
        system call (or maximum API call) interrupt priority.  Interrupts that are\r
@@ -1584,10 +1742,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                vQueueUnregisterQueue( pxQueue );\r
        }\r
        #endif\r
-       if( pxQueue->pcHead != NULL )\r
-       {\r
-               vPortFree( pxQueue->pcHead );\r
-       }\r
        vPortFree( pxQueue );\r
 }\r
 /*-----------------------------------------------------------*/\r
@@ -1633,7 +1787,6 @@ BaseType_t xReturn = pdFALSE;
                        if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
                        {\r
                                /* The mutex is no longer being held. */\r
-                               vTaskDecrementMutexHeldCount();\r
                                xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
                                pxQueue->pxMutexHolder = NULL;\r
                        }\r
@@ -1699,7 +1852,7 @@ BaseType_t xReturn = pdFALSE;
 \r
 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
 {\r
-       if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )\r
+       if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
        {\r
                pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
                if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
@@ -1712,11 +1865,6 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
                }\r
                ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
        }\r
-       else\r
-       {\r
-               /* A mutex was taken. */\r
-               vTaskIncrementMutexHeldCount();\r
-       }\r
 }\r
 /*-----------------------------------------------------------*/\r
 \r