]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Changes to core code and port layer:
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     This file is part of the FreeRTOS distribution.\r
8 \r
9     FreeRTOS is free software; you can redistribute it and/or modify it under\r
10     the terms of the GNU General Public License (version 2) as published by the\r
11     Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.\r
12 \r
13     ***************************************************************************\r
14     >>!   NOTE: The modification to the GPL is included to allow you to     !<<\r
15     >>!   distribute a combined work that includes FreeRTOS without being   !<<\r
16     >>!   obliged to provide the source code for proprietary components     !<<\r
17     >>!   outside of the FreeRTOS kernel.                                   !<<\r
18     ***************************************************************************\r
19 \r
20     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
21     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
22     FOR A PARTICULAR PURPOSE.  Full license text is available on the following\r
23     link: http://www.freertos.org/a00114.html\r
24 \r
25     ***************************************************************************\r
26      *                                                                       *\r
27      *    FreeRTOS provides completely free yet professionally developed,    *\r
28      *    robust, strictly quality controlled, supported, and cross          *\r
29      *    platform software that is more than just the market leader, it     *\r
30      *    is the industry's de facto standard.                               *\r
31      *                                                                       *\r
32      *    Help yourself get started quickly while simultaneously helping     *\r
33      *    to support the FreeRTOS project by purchasing a FreeRTOS           *\r
34      *    tutorial book, reference manual, or both:                          *\r
35      *    http://www.FreeRTOS.org/Documentation                              *\r
36      *                                                                       *\r
37     ***************************************************************************\r
38 \r
39     http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading\r
40     the FAQ page "My application does not run, what could be wrong?".  Have you\r
41     defined configASSERT()?\r
42 \r
43     http://www.FreeRTOS.org/support - In return for receiving this top quality\r
44     embedded software for free we request you assist our global community by\r
45     participating in the support forum.\r
46 \r
47     http://www.FreeRTOS.org/training - Investing in training allows your team to\r
48     be as productive as possible as early as possible.  Now you can receive\r
49     FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers\r
50     Ltd, and the world's leading authority on the world's leading RTOS.\r
51 \r
52     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
53     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
54     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
55 \r
56     http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
57     Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
58 \r
59     http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High\r
60     Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
61     licenses offer ticketed support, indemnification and commercial middleware.\r
62 \r
63     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
64     engineered and independently SIL3 certified version for use in safety and\r
65     mission critical applications that require provable dependability.\r
66 \r
67     1 tab == 4 spaces!\r
68 */\r
69 \r
70 #include <stdlib.h>\r
71 #include <string.h>\r
72 \r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
74 all the API functions to use the MPU wrappers.  That should only be done when\r
75 task.h is included from an application file. */\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
77 \r
78 #include "FreeRTOS.h"\r
79 #include "task.h"\r
80 #include "queue.h"\r
81 \r
82 #if ( configUSE_CO_ROUTINES == 1 )\r
83         #include "croutine.h"\r
84 #endif\r
85 \r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
88 header files above, but not in this file, in order to generate the correct\r
89 privileged Vs unprivileged linkage and placement. */\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
91 \r
92 \r
93 /* Constants used with the cRxLock and cTxLock structure members. */\r
94 #define queueUNLOCKED                                   ( ( int8_t ) -1 )\r
95 #define queueLOCKED_UNMODIFIED                  ( ( int8_t ) 0 )\r
96 \r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
98 pcTail members are used as pointers into the queue storage area.  When the\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
101 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
102 names to the pcHead and pcTail structure members to ensure the readability of\r
103 the code is maintained despite this dual use of two structure members.  An\r
104 alternative implementation would be to use a union, but use of a union is\r
105 against the coding standard (although an exception to the standard has been\r
106 permitted where the dual use also significantly changes the type of the\r
107 structure member). */\r
108 #define pxMutexHolder                                   pcTail\r
109 #define uxQueueType                                             pcHead\r
110 #define queueQUEUE_IS_MUTEX                             NULL\r
111 \r
112 /* Semaphores do not actually store or copy data, so have an item size of\r
113 zero. */\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
115 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
116 \r
117 #if( configUSE_PREEMPTION == 0 )\r
118         /* If the cooperative scheduler is being used then a yield should not be\r
119         performed just because a higher priority task has been woken. */\r
120         #define queueYIELD_IF_USING_PREEMPTION()\r
121 #else\r
122         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
123 #endif\r
124 \r
125 /*\r
126  * Definition of the queue used by the scheduler.\r
127  * Items are queued by copy, not reference.  See the following link for the\r
128  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
129  */\r
130 typedef struct QueueDefinition\r
131 {\r
132         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
133         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
134         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
135 \r
136         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
137         {\r
138                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
139                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
140         } u;\r
141 \r
142         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
143         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
144 \r
145         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
146         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
147         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
148 \r
149         volatile int8_t cRxLock;                /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
150         volatile int8_t cTxLock;                /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
151 \r
152         #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
153                 uint8_t ucStaticallyAllocated;  /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */\r
154         #endif\r
155 \r
156         #if ( configUSE_QUEUE_SETS == 1 )\r
157                 struct QueueDefinition *pxQueueSetContainer;\r
158         #endif\r
159 \r
160         #if ( configUSE_TRACE_FACILITY == 1 )\r
161                 UBaseType_t uxQueueNumber;\r
162                 uint8_t ucQueueType;\r
163         #endif\r
164 \r
165 } xQUEUE;\r
166 \r
167 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
168 name below to enable the use of older kernel aware debuggers. */\r
169 typedef xQUEUE Queue_t;\r
170 \r
171 /*-----------------------------------------------------------*/\r
172 \r
173 /*\r
174  * The queue registry is just a means for kernel aware debuggers to locate\r
175  * queue structures.  It has no other purpose so is an optional component.\r
176  */\r
177 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
178 \r
179         /* The type stored within the queue registry array.  This allows a name\r
180         to be assigned to each queue making kernel aware debugging a little\r
181         more user friendly. */\r
182         typedef struct QUEUE_REGISTRY_ITEM\r
183         {\r
184                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
185                 QueueHandle_t xHandle;\r
186         } xQueueRegistryItem;\r
187 \r
188         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
189         new xQueueRegistryItem name below to enable the use of older kernel aware\r
190         debuggers. */\r
191         typedef xQueueRegistryItem QueueRegistryItem_t;\r
192 \r
193         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
194         The pcQueueName member of a structure being NULL is indicative of the\r
195         array position being vacant. */\r
196         PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
197 \r
198 #endif /* configQUEUE_REGISTRY_SIZE */\r
199 \r
200 /*\r
201  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
202  * prevent an ISR from adding or removing items to the queue, but does prevent\r
203  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
204  * queue is locked it will instead increment the appropriate queue lock count\r
205  * to indicate that a task may require unblocking.  When the queue in unlocked\r
206  * these lock counts are inspected, and the appropriate action taken.\r
207  */\r
208 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
209 \r
210 /*\r
211  * Uses a critical section to determine if there is any data in a queue.\r
212  *\r
213  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
214  */\r
215 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
216 \r
217 /*\r
218  * Uses a critical section to determine if there is any space in a queue.\r
219  *\r
220  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
221  */\r
222 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
223 \r
224 /*\r
225  * Copies an item into the queue, either at the front of the queue or the\r
226  * back of the queue.\r
227  */\r
228 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
229 \r
230 /*\r
231  * Copies an item out of a queue.\r
232  */\r
233 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
234 \r
235 #if ( configUSE_QUEUE_SETS == 1 )\r
236         /*\r
237          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
238          * the queue set that the queue contains data.\r
239          */\r
240         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
241 #endif\r
242 \r
243 /*\r
244  * Called after a Queue_t structure has been allocated either statically or\r
245  * dynamically to fill in the structure's members.\r
246  */\r
247 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;\r
248 \r
249 /*\r
250  * Mutexes are a special type of queue.  When a mutex is created, first the\r
251  * queue is created, then prvInitialiseMutex() is called to configure the queue\r
252  * as a mutex.\r
253  */\r
254 #if( configUSE_MUTEXES == 1 )\r
255         static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;\r
256 #endif\r
257 \r
258 /*-----------------------------------------------------------*/\r
259 \r
260 /*\r
261  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
262  * accessing the queue event lists.\r
263  */\r
264 #define prvLockQueue( pxQueue )                                                         \\r
265         taskENTER_CRITICAL();                                                                   \\r
266         {                                                                                                               \\r
267                 if( ( pxQueue )->cRxLock == queueUNLOCKED )                     \\r
268                 {                                                                                                       \\r
269                         ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED;  \\r
270                 }                                                                                                       \\r
271                 if( ( pxQueue )->cTxLock == queueUNLOCKED )                     \\r
272                 {                                                                                                       \\r
273                         ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED;  \\r
274                 }                                                                                                       \\r
275         }                                                                                                               \\r
276         taskEXIT_CRITICAL()\r
277 /*-----------------------------------------------------------*/\r
278 \r
279 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
280 {\r
281 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
282 \r
283         configASSERT( pxQueue );\r
284 \r
285         taskENTER_CRITICAL();\r
286         {\r
287                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
288                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
289                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
290                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
291                 pxQueue->cRxLock = queueUNLOCKED;\r
292                 pxQueue->cTxLock = queueUNLOCKED;\r
293 \r
294                 if( xNewQueue == pdFALSE )\r
295                 {\r
296                         /* If there are tasks blocked waiting to read from the queue, then\r
297                         the tasks will remain blocked as after this function exits the queue\r
298                         will still be empty.  If there are tasks blocked waiting to write to\r
299                         the queue, then one should be unblocked as after this function exits\r
300                         it will be possible to write to it. */\r
301                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
302                         {\r
303                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
304                                 {\r
305                                         queueYIELD_IF_USING_PREEMPTION();\r
306                                 }\r
307                                 else\r
308                                 {\r
309                                         mtCOVERAGE_TEST_MARKER();\r
310                                 }\r
311                         }\r
312                         else\r
313                         {\r
314                                 mtCOVERAGE_TEST_MARKER();\r
315                         }\r
316                 }\r
317                 else\r
318                 {\r
319                         /* Ensure the event queues start in the correct state. */\r
320                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
321                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
322                 }\r
323         }\r
324         taskEXIT_CRITICAL();\r
325 \r
326         /* A value is returned for calling semantic consistency with previous\r
327         versions. */\r
328         return pdPASS;\r
329 }\r
330 /*-----------------------------------------------------------*/\r
331 \r
332 #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
333 \r
334         QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )\r
335         {\r
336         Queue_t *pxNewQueue;\r
337 \r
338                 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
339 \r
340                 /* The StaticQueue_t structure and the queue storage area must be\r
341                 supplied. */\r
342                 configASSERT( pxStaticQueue != NULL );\r
343 \r
344                 /* A queue storage area should be provided if the item size is not 0, and\r
345                 should not be provided if the item size is 0. */\r
346                 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );\r
347                 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );\r
348 \r
349                 #if( configASSERT_DEFINED == 1 )\r
350                 {\r
351                         /* Sanity check that the size of the structure used to declare a\r
352                         variable of type StaticQueue_t or StaticSemaphore_t equals the size of\r
353                         the real queue and semaphore structures. */\r
354                         volatile size_t xSize = sizeof( StaticQueue_t );\r
355                         configASSERT( xSize == sizeof( Queue_t ) );\r
356                 }\r
357                 #endif /* configASSERT_DEFINED */\r
358 \r
359                 /* The address of a statically allocated queue was passed in, use it.\r
360                 The address of a statically allocated storage area was also passed in\r
361                 but is already set. */\r
362                 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */\r
363 \r
364                 if( pxNewQueue != NULL )\r
365                 {\r
366                         #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )\r
367                         {\r
368                                 /* Queues can be allocated wither statically or dynamically, so\r
369                                 note this queue was allocated statically in case the queue is\r
370                                 later deleted. */\r
371                                 pxNewQueue->ucStaticallyAllocated = pdTRUE;\r
372                         }\r
373                         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
374 \r
375                         prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );\r
376                 }\r
377 \r
378                 return pxNewQueue;\r
379         }\r
380 \r
381 #endif /* configSUPPORT_STATIC_ALLOCATION */\r
382 /*-----------------------------------------------------------*/\r
383 \r
384 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )\r
385 \r
386         QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
387         {\r
388         Queue_t *pxNewQueue;\r
389         size_t xQueueSizeInBytes;\r
390         uint8_t *pucQueueStorage;\r
391 \r
392                 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
393 \r
394                 if( uxItemSize == ( UBaseType_t ) 0 )\r
395                 {\r
396                         /* There is not going to be a queue storage area. */\r
397                         xQueueSizeInBytes = ( size_t ) 0;\r
398                 }\r
399                 else\r
400                 {\r
401                         /* Allocate enough space to hold the maximum number of items that\r
402                         can be in the queue at any time. */\r
403                         xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
404                 }\r
405 \r
406                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
407 \r
408                 if( pxNewQueue != NULL )\r
409                 {\r
410                         /* Jump past the queue structure to find the location of the queue\r
411                         storage area. */\r
412                         pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );\r
413 \r
414                         #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
415                         {\r
416                                 /* Queues can be created either statically or dynamically, so\r
417                                 note this task was created dynamically in case it is later\r
418                                 deleted. */\r
419                                 pxNewQueue->ucStaticallyAllocated = pdFALSE;\r
420                         }\r
421                         #endif /* configSUPPORT_STATIC_ALLOCATION */\r
422 \r
423                         prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );\r
424                 }\r
425 \r
426                 return pxNewQueue;\r
427         }\r
428 \r
429 #endif /* configSUPPORT_STATIC_ALLOCATION */\r
430 /*-----------------------------------------------------------*/\r
431 \r
432 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )\r
433 {\r
434         /* Remove compiler warnings about unused parameters should\r
435         configUSE_TRACE_FACILITY not be set to 1. */\r
436         ( void ) ucQueueType;\r
437 \r
438         if( uxItemSize == ( UBaseType_t ) 0 )\r
439         {\r
440                 /* No RAM was allocated for the queue storage area, but PC head cannot\r
441                 be set to NULL because NULL is used as a key to say the queue is used as\r
442                 a mutex.  Therefore just set pcHead to point to the queue as a benign\r
443                 value that is known to be within the memory map. */\r
444                 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
445         }\r
446         else\r
447         {\r
448                 /* Set the head to the start of the queue storage area. */\r
449                 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;\r
450         }\r
451 \r
452         /* Initialise the queue members as described where the queue type is\r
453         defined. */\r
454         pxNewQueue->uxLength = uxQueueLength;\r
455         pxNewQueue->uxItemSize = uxItemSize;\r
456         ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
457 \r
458         #if ( configUSE_TRACE_FACILITY == 1 )\r
459         {\r
460                 pxNewQueue->ucQueueType = ucQueueType;\r
461         }\r
462         #endif /* configUSE_TRACE_FACILITY */\r
463 \r
464         #if( configUSE_QUEUE_SETS == 1 )\r
465         {\r
466                 pxNewQueue->pxQueueSetContainer = NULL;\r
467         }\r
468         #endif /* configUSE_QUEUE_SETS */\r
469 \r
470         traceQUEUE_CREATE( pxNewQueue );\r
471 }\r
472 /*-----------------------------------------------------------*/\r
473 \r
474 #if( configUSE_MUTEXES == 1 )\r
475 \r
476         static void prvInitialiseMutex( Queue_t *pxNewQueue )\r
477         {\r
478                 if( pxNewQueue != NULL )\r
479                 {\r
480                         /* The queue create function will set all the queue structure members\r
481                         correctly for a generic queue, but this function is creating a\r
482                         mutex.  Overwrite those members that need to be set differently -\r
483                         in particular the information required for priority inheritance. */\r
484                         pxNewQueue->pxMutexHolder = NULL;\r
485                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
486 \r
487                         /* In case this is a recursive mutex. */\r
488                         pxNewQueue->u.uxRecursiveCallCount = 0;\r
489 \r
490                         traceCREATE_MUTEX( pxNewQueue );\r
491 \r
492                         /* Start with the semaphore in the expected state. */\r
493                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
494                 }\r
495                 else\r
496                 {\r
497                         traceCREATE_MUTEX_FAILED();\r
498                 }\r
499         }\r
500 \r
501 #endif /* configUSE_MUTEXES */\r
502 /*-----------------------------------------------------------*/\r
503 \r
504 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
505 \r
506         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
507         {\r
508         Queue_t *pxNewQueue;\r
509         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;\r
510 \r
511                 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );\r
512                 prvInitialiseMutex( pxNewQueue );\r
513 \r
514                 return pxNewQueue;\r
515         }\r
516 \r
517 #endif /* configUSE_MUTEXES */\r
518 /*-----------------------------------------------------------*/\r
519 \r
520 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
521 \r
522         QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )\r
523         {\r
524         Queue_t *pxNewQueue;\r
525         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;\r
526 \r
527                 /* Prevent compiler warnings about unused parameters if\r
528                 configUSE_TRACE_FACILITY does not equal 1. */\r
529                 ( void ) ucQueueType;\r
530 \r
531                 pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );\r
532                 prvInitialiseMutex( pxNewQueue );\r
533 \r
534                 return pxNewQueue;\r
535         }\r
536 \r
537 #endif /* configUSE_MUTEXES */\r
538 /*-----------------------------------------------------------*/\r
539 \r
540 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
541 \r
542         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
543         {\r
544         void *pxReturn;\r
545 \r
546                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
547                 be called directly.  Note:  This is a good way of determining if the\r
548                 calling task is the mutex holder, but not a good way of determining the\r
549                 identity of the mutex holder, as the holder may change between the\r
550                 following critical section exiting and the function returning. */\r
551                 taskENTER_CRITICAL();\r
552                 {\r
553                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
554                         {\r
555                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
556                         }\r
557                         else\r
558                         {\r
559                                 pxReturn = NULL;\r
560                         }\r
561                 }\r
562                 taskEXIT_CRITICAL();\r
563 \r
564                 return pxReturn;\r
565         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
566 \r
567 #endif\r
568 /*-----------------------------------------------------------*/\r
569 \r
570 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
571 \r
572         void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )\r
573         {\r
574         void *pxReturn;\r
575 \r
576                 configASSERT( xSemaphore );\r
577 \r
578                 /* Mutexes cannot be used in interrupt service routines, so the mutex\r
579                 holder should not change in an ISR, and therefore a critical section is\r
580                 not required here. */\r
581                 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
582                 {\r
583                         pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
584                 }\r
585                 else\r
586                 {\r
587                         pxReturn = NULL;\r
588                 }\r
589 \r
590                 return pxReturn;\r
591         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
592 \r
593 #endif\r
594 /*-----------------------------------------------------------*/\r
595 \r
596 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
597 \r
598         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
599         {\r
600         BaseType_t xReturn;\r
601         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
602 \r
603                 configASSERT( pxMutex );\r
604 \r
605                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
606                 change outside of this task.  If this task does not hold the mutex then\r
607                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
608                 this is the only condition we are interested in it does not matter if\r
609                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
610                 mutual exclusion is required to test the pxMutexHolder variable. */\r
611                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
612                 {\r
613                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
614 \r
615                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
616                         the task handle, therefore no underflow check is required.  Also,\r
617                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
618                         there can only be one, no mutual exclusion is required to modify the\r
619                         uxRecursiveCallCount member. */\r
620                         ( pxMutex->u.uxRecursiveCallCount )--;\r
621 \r
622                         /* Has the recursive call count unwound to 0? */\r
623                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
624                         {\r
625                                 /* Return the mutex.  This will automatically unblock any other\r
626                                 task that might be waiting to access the mutex. */\r
627                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
628                         }\r
629                         else\r
630                         {\r
631                                 mtCOVERAGE_TEST_MARKER();\r
632                         }\r
633 \r
634                         xReturn = pdPASS;\r
635                 }\r
636                 else\r
637                 {\r
638                         /* The mutex cannot be given because the calling task is not the\r
639                         holder. */\r
640                         xReturn = pdFAIL;\r
641 \r
642                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
643                 }\r
644 \r
645                 return xReturn;\r
646         }\r
647 \r
648 #endif /* configUSE_RECURSIVE_MUTEXES */\r
649 /*-----------------------------------------------------------*/\r
650 \r
651 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
652 \r
653         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
654         {\r
655         BaseType_t xReturn;\r
656         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
657 \r
658                 configASSERT( pxMutex );\r
659 \r
660                 /* Comments regarding mutual exclusion as per those within\r
661                 xQueueGiveMutexRecursive(). */\r
662 \r
663                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
664 \r
665                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
666                 {\r
667                         ( pxMutex->u.uxRecursiveCallCount )++;\r
668                         xReturn = pdPASS;\r
669                 }\r
670                 else\r
671                 {\r
672                         xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
673 \r
674                         /* pdPASS will only be returned if the mutex was successfully\r
675                         obtained.  The calling task may have entered the Blocked state\r
676                         before reaching here. */\r
677                         if( xReturn != pdFAIL )\r
678                         {\r
679                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
680                         }\r
681                         else\r
682                         {\r
683                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
684                         }\r
685                 }\r
686 \r
687                 return xReturn;\r
688         }\r
689 \r
690 #endif /* configUSE_RECURSIVE_MUTEXES */\r
691 /*-----------------------------------------------------------*/\r
692 \r
693 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
694 \r
695         QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )\r
696         {\r
697         QueueHandle_t xHandle;\r
698 \r
699                 configASSERT( uxMaxCount != 0 );\r
700                 configASSERT( uxInitialCount <= uxMaxCount );\r
701 \r
702                 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
703 \r
704                 if( xHandle != NULL )\r
705                 {\r
706                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
707 \r
708                         traceCREATE_COUNTING_SEMAPHORE();\r
709                 }\r
710                 else\r
711                 {\r
712                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
713                 }\r
714 \r
715                 return xHandle;\r
716         }\r
717 \r
718 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */\r
719 /*-----------------------------------------------------------*/\r
720 \r
721 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
722 \r
723         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
724         {\r
725         QueueHandle_t xHandle;\r
726 \r
727                 configASSERT( uxMaxCount != 0 );\r
728                 configASSERT( uxInitialCount <= uxMaxCount );\r
729 \r
730                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
731 \r
732                 if( xHandle != NULL )\r
733                 {\r
734                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
735 \r
736                         traceCREATE_COUNTING_SEMAPHORE();\r
737                 }\r
738                 else\r
739                 {\r
740                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
741                 }\r
742 \r
743                 return xHandle;\r
744         }\r
745 \r
746 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */\r
747 /*-----------------------------------------------------------*/\r
748 \r
749 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
750 {\r
751 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
752 TimeOut_t xTimeOut;\r
753 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
754 \r
755         configASSERT( pxQueue );\r
756         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
757         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
758         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
759         {\r
760                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
761         }\r
762         #endif\r
763 \r
764 \r
765         /* This function relaxes the coding standard somewhat to allow return\r
766         statements within the function itself.  This is done in the interest\r
767         of execution time efficiency. */\r
768         for( ;; )\r
769         {\r
770                 taskENTER_CRITICAL();\r
771                 {\r
772                         /* Is there room on the queue now?  The running task must be the\r
773                         highest priority task wanting to access the queue.  If the head item\r
774                         in the queue is to be overwritten then it does not matter if the\r
775                         queue is full. */\r
776                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
777                         {\r
778                                 traceQUEUE_SEND( pxQueue );\r
779                                 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
780 \r
781                                 #if ( configUSE_QUEUE_SETS == 1 )\r
782                                 {\r
783                                         if( pxQueue->pxQueueSetContainer != NULL )\r
784                                         {\r
785                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )\r
786                                                 {\r
787                                                         /* The queue is a member of a queue set, and posting\r
788                                                         to the queue set caused a higher priority task to\r
789                                                         unblock. A context switch is required. */\r
790                                                         queueYIELD_IF_USING_PREEMPTION();\r
791                                                 }\r
792                                                 else\r
793                                                 {\r
794                                                         mtCOVERAGE_TEST_MARKER();\r
795                                                 }\r
796                                         }\r
797                                         else\r
798                                         {\r
799                                                 /* If there was a task waiting for data to arrive on the\r
800                                                 queue then unblock it now. */\r
801                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
802                                                 {\r
803                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
804                                                         {\r
805                                                                 /* The unblocked task has a priority higher than\r
806                                                                 our own so yield immediately.  Yes it is ok to\r
807                                                                 do this from within the critical section - the\r
808                                                                 kernel takes care of that. */\r
809                                                                 queueYIELD_IF_USING_PREEMPTION();\r
810                                                         }\r
811                                                         else\r
812                                                         {\r
813                                                                 mtCOVERAGE_TEST_MARKER();\r
814                                                         }\r
815                                                 }\r
816                                                 else if( xYieldRequired != pdFALSE )\r
817                                                 {\r
818                                                         /* This path is a special case that will only get\r
819                                                         executed if the task was holding multiple mutexes\r
820                                                         and the mutexes were given back in an order that is\r
821                                                         different to that in which they were taken. */\r
822                                                         queueYIELD_IF_USING_PREEMPTION();\r
823                                                 }\r
824                                                 else\r
825                                                 {\r
826                                                         mtCOVERAGE_TEST_MARKER();\r
827                                                 }\r
828                                         }\r
829                                 }\r
830                                 #else /* configUSE_QUEUE_SETS */\r
831                                 {\r
832                                         /* If there was a task waiting for data to arrive on the\r
833                                         queue then unblock it now. */\r
834                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
835                                         {\r
836                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
837                                                 {\r
838                                                         /* The unblocked task has a priority higher than\r
839                                                         our own so yield immediately.  Yes it is ok to do\r
840                                                         this from within the critical section - the kernel\r
841                                                         takes care of that. */\r
842                                                         queueYIELD_IF_USING_PREEMPTION();\r
843                                                 }\r
844                                                 else\r
845                                                 {\r
846                                                         mtCOVERAGE_TEST_MARKER();\r
847                                                 }\r
848                                         }\r
849                                         else if( xYieldRequired != pdFALSE )\r
850                                         {\r
851                                                 /* This path is a special case that will only get\r
852                                                 executed if the task was holding multiple mutexes and\r
853                                                 the mutexes were given back in an order that is\r
854                                                 different to that in which they were taken. */\r
855                                                 queueYIELD_IF_USING_PREEMPTION();\r
856                                         }\r
857                                         else\r
858                                         {\r
859                                                 mtCOVERAGE_TEST_MARKER();\r
860                                         }\r
861                                 }\r
862                                 #endif /* configUSE_QUEUE_SETS */\r
863 \r
864                                 taskEXIT_CRITICAL();\r
865                                 return pdPASS;\r
866                         }\r
867                         else\r
868                         {\r
869                                 if( xTicksToWait == ( TickType_t ) 0 )\r
870                                 {\r
871                                         /* The queue was full and no block time is specified (or\r
872                                         the block time has expired) so leave now. */\r
873                                         taskEXIT_CRITICAL();\r
874 \r
875                                         /* Return to the original privilege level before exiting\r
876                                         the function. */\r
877                                         traceQUEUE_SEND_FAILED( pxQueue );\r
878                                         return errQUEUE_FULL;\r
879                                 }\r
880                                 else if( xEntryTimeSet == pdFALSE )\r
881                                 {\r
882                                         /* The queue was full and a block time was specified so\r
883                                         configure the timeout structure. */\r
884                                         vTaskSetTimeOutState( &xTimeOut );\r
885                                         xEntryTimeSet = pdTRUE;\r
886                                 }\r
887                                 else\r
888                                 {\r
889                                         /* Entry time was already set. */\r
890                                         mtCOVERAGE_TEST_MARKER();\r
891                                 }\r
892                         }\r
893                 }\r
894                 taskEXIT_CRITICAL();\r
895 \r
896                 /* Interrupts and other tasks can send to and receive from the queue\r
897                 now the critical section has been exited. */\r
898 \r
899                 vTaskSuspendAll();\r
900                 prvLockQueue( pxQueue );\r
901 \r
902                 /* Update the timeout state to see if it has expired yet. */\r
903                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
904                 {\r
905                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
906                         {\r
907                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
908                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
909 \r
910                                 /* Unlocking the queue means queue events can effect the\r
911                                 event list.  It is possible     that interrupts occurring now\r
912                                 remove this task from the event list again - but as the\r
913                                 scheduler is suspended the task will go onto the pending\r
914                                 ready last instead of the actual ready list. */\r
915                                 prvUnlockQueue( pxQueue );\r
916 \r
917                                 /* Resuming the scheduler will move tasks from the pending\r
918                                 ready list into the ready list - so it is feasible that this\r
919                                 task is already in a ready list before it yields - in which\r
920                                 case the yield will not cause a context switch unless there\r
921                                 is also a higher priority task in the pending ready list. */\r
922                                 if( xTaskResumeAll() == pdFALSE )\r
923                                 {\r
924                                         portYIELD_WITHIN_API();\r
925                                 }\r
926                         }\r
927                         else\r
928                         {\r
929                                 /* Try again. */\r
930                                 prvUnlockQueue( pxQueue );\r
931                                 ( void ) xTaskResumeAll();\r
932                         }\r
933                 }\r
934                 else\r
935                 {\r
936                         /* The timeout has expired. */\r
937                         prvUnlockQueue( pxQueue );\r
938                         ( void ) xTaskResumeAll();\r
939 \r
940                         traceQUEUE_SEND_FAILED( pxQueue );\r
941                         return errQUEUE_FULL;\r
942                 }\r
943         }\r
944 }\r
945 /*-----------------------------------------------------------*/\r
946 \r
947 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
948 {\r
949 BaseType_t xReturn;\r
950 UBaseType_t uxSavedInterruptStatus;\r
951 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
952 \r
953         configASSERT( pxQueue );\r
954         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
955         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
956 \r
957         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
958         system call (or maximum API call) interrupt priority.  Interrupts that are\r
959         above the maximum system call priority are kept permanently enabled, even\r
960         when the RTOS kernel is in a critical section, but cannot make any calls to\r
961         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
962         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
963         failure if a FreeRTOS API function is called from an interrupt that has been\r
964         assigned a priority above the configured maximum system call priority.\r
965         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
966         that have been assigned a priority at or (logically) below the maximum\r
967         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
968         safe API to ensure interrupt entry is as fast and as simple as possible.\r
969         More information (albeit Cortex-M specific) is provided on the following\r
970         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
971         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
972 \r
973         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
974         in the queue.  Also don't directly wake a task that was blocked on a queue\r
975         read, instead return a flag to say whether a context switch is required or\r
976         not (i.e. has a task with a higher priority than us been woken by this\r
977         post). */\r
978         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
979         {\r
980                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
981                 {\r
982                         const int8_t cTxLock = pxQueue->cTxLock;\r
983 \r
984                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
985 \r
986                         /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
987                         semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
988                         in a task disinheriting a priority and prvCopyDataToQueue() can be\r
989                         called here even though the disinherit function does not check if\r
990                         the scheduler is suspended before accessing the ready lists. */\r
991                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
992 \r
993                         /* The event list is not altered if the queue is locked.  This will\r
994                         be done when the queue is unlocked later. */\r
995                         if( cTxLock == queueUNLOCKED )\r
996                         {\r
997                                 #if ( configUSE_QUEUE_SETS == 1 )\r
998                                 {\r
999                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1000                                         {\r
1001                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )\r
1002                                                 {\r
1003                                                         /* The queue is a member of a queue set, and posting\r
1004                                                         to the queue set caused a higher priority task to\r
1005                                                         unblock.  A context switch is required. */\r
1006                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1007                                                         {\r
1008                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1009                                                         }\r
1010                                                         else\r
1011                                                         {\r
1012                                                                 mtCOVERAGE_TEST_MARKER();\r
1013                                                         }\r
1014                                                 }\r
1015                                                 else\r
1016                                                 {\r
1017                                                         mtCOVERAGE_TEST_MARKER();\r
1018                                                 }\r
1019                                         }\r
1020                                         else\r
1021                                         {\r
1022                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1023                                                 {\r
1024                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1025                                                         {\r
1026                                                                 /* The task waiting has a higher priority so\r
1027                                                                 record that a context switch is required. */\r
1028                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1029                                                                 {\r
1030                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1031                                                                 }\r
1032                                                                 else\r
1033                                                                 {\r
1034                                                                         mtCOVERAGE_TEST_MARKER();\r
1035                                                                 }\r
1036                                                         }\r
1037                                                         else\r
1038                                                         {\r
1039                                                                 mtCOVERAGE_TEST_MARKER();\r
1040                                                         }\r
1041                                                 }\r
1042                                                 else\r
1043                                                 {\r
1044                                                         mtCOVERAGE_TEST_MARKER();\r
1045                                                 }\r
1046                                         }\r
1047                                 }\r
1048                                 #else /* configUSE_QUEUE_SETS */\r
1049                                 {\r
1050                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1051                                         {\r
1052                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1053                                                 {\r
1054                                                         /* The task waiting has a higher priority so record that a\r
1055                                                         context switch is required. */\r
1056                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1057                                                         {\r
1058                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1059                                                         }\r
1060                                                         else\r
1061                                                         {\r
1062                                                                 mtCOVERAGE_TEST_MARKER();\r
1063                                                         }\r
1064                                                 }\r
1065                                                 else\r
1066                                                 {\r
1067                                                         mtCOVERAGE_TEST_MARKER();\r
1068                                                 }\r
1069                                         }\r
1070                                         else\r
1071                                         {\r
1072                                                 mtCOVERAGE_TEST_MARKER();\r
1073                                         }\r
1074                                 }\r
1075                                 #endif /* configUSE_QUEUE_SETS */\r
1076                         }\r
1077                         else\r
1078                         {\r
1079                                 /* Increment the lock count so the task that unlocks the queue\r
1080                                 knows that data was posted while it was locked. */\r
1081                                 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
1082                         }\r
1083 \r
1084                         xReturn = pdPASS;\r
1085                 }\r
1086                 else\r
1087                 {\r
1088                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1089                         xReturn = errQUEUE_FULL;\r
1090                 }\r
1091         }\r
1092         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1093 \r
1094         return xReturn;\r
1095 }\r
1096 /*-----------------------------------------------------------*/\r
1097 \r
1098 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1099 {\r
1100 BaseType_t xReturn;\r
1101 UBaseType_t uxSavedInterruptStatus;\r
1102 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1103 \r
1104         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1105         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1106         read, instead return a flag to say whether a context switch is required or\r
1107         not (i.e. has a task with a higher priority than us been woken by this\r
1108         post). */\r
1109 \r
1110         configASSERT( pxQueue );\r
1111 \r
1112         /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
1113         if the item size is not 0. */\r
1114         configASSERT( pxQueue->uxItemSize == 0 );\r
1115 \r
1116         /* Normally a mutex would not be given from an interrupt, especially if\r
1117         there is a mutex holder, as priority inheritance makes no sense for an\r
1118         interrupts, only tasks. */\r
1119         configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );\r
1120 \r
1121         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1122         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1123         above the maximum system call priority are kept permanently enabled, even\r
1124         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1125         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1126         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1127         failure if a FreeRTOS API function is called from an interrupt that has been\r
1128         assigned a priority above the configured maximum system call priority.\r
1129         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1130         that have been assigned a priority at or (logically) below the maximum\r
1131         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1132         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1133         More information (albeit Cortex-M specific) is provided on the following\r
1134         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1135         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1136 \r
1137         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1138         {\r
1139                 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1140 \r
1141                 /* When the queue is used to implement a semaphore no data is ever\r
1142                 moved through the queue but it is still valid to see if the queue 'has\r
1143                 space'. */\r
1144                 if( uxMessagesWaiting < pxQueue->uxLength )\r
1145                 {\r
1146                         const int8_t cTxLock = pxQueue->cTxLock;\r
1147 \r
1148                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1149 \r
1150                         /* A task can only have an inherited priority if it is a mutex\r
1151                         holder - and if there is a mutex holder then the mutex cannot be\r
1152                         given from an ISR.  As this is the ISR version of the function it\r
1153                         can be assumed there is no mutex holder and no need to determine if\r
1154                         priority disinheritance is needed.  Simply increase the count of\r
1155                         messages (semaphores) available. */\r
1156                         pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;\r
1157 \r
1158                         /* The event list is not altered if the queue is locked.  This will\r
1159                         be done when the queue is unlocked later. */\r
1160                         if( cTxLock == queueUNLOCKED )\r
1161                         {\r
1162                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1163                                 {\r
1164                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1165                                         {\r
1166                                                 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )\r
1167                                                 {\r
1168                                                         /* The semaphore is a member of a queue set, and\r
1169                                                         posting to the queue set caused a higher priority\r
1170                                                         task to unblock.  A context switch is required. */\r
1171                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1172                                                         {\r
1173                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1174                                                         }\r
1175                                                         else\r
1176                                                         {\r
1177                                                                 mtCOVERAGE_TEST_MARKER();\r
1178                                                         }\r
1179                                                 }\r
1180                                                 else\r
1181                                                 {\r
1182                                                         mtCOVERAGE_TEST_MARKER();\r
1183                                                 }\r
1184                                         }\r
1185                                         else\r
1186                                         {\r
1187                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1188                                                 {\r
1189                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1190                                                         {\r
1191                                                                 /* The task waiting has a higher priority so\r
1192                                                                 record that a context switch is required. */\r
1193                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1194                                                                 {\r
1195                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1196                                                                 }\r
1197                                                                 else\r
1198                                                                 {\r
1199                                                                         mtCOVERAGE_TEST_MARKER();\r
1200                                                                 }\r
1201                                                         }\r
1202                                                         else\r
1203                                                         {\r
1204                                                                 mtCOVERAGE_TEST_MARKER();\r
1205                                                         }\r
1206                                                 }\r
1207                                                 else\r
1208                                                 {\r
1209                                                         mtCOVERAGE_TEST_MARKER();\r
1210                                                 }\r
1211                                         }\r
1212                                 }\r
1213                                 #else /* configUSE_QUEUE_SETS */\r
1214                                 {\r
1215                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1216                                         {\r
1217                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1218                                                 {\r
1219                                                         /* The task waiting has a higher priority so record that a\r
1220                                                         context switch is required. */\r
1221                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1222                                                         {\r
1223                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1224                                                         }\r
1225                                                         else\r
1226                                                         {\r
1227                                                                 mtCOVERAGE_TEST_MARKER();\r
1228                                                         }\r
1229                                                 }\r
1230                                                 else\r
1231                                                 {\r
1232                                                         mtCOVERAGE_TEST_MARKER();\r
1233                                                 }\r
1234                                         }\r
1235                                         else\r
1236                                         {\r
1237                                                 mtCOVERAGE_TEST_MARKER();\r
1238                                         }\r
1239                                 }\r
1240                                 #endif /* configUSE_QUEUE_SETS */\r
1241                         }\r
1242                         else\r
1243                         {\r
1244                                 /* Increment the lock count so the task that unlocks the queue\r
1245                                 knows that data was posted while it was locked. */\r
1246                                 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
1247                         }\r
1248 \r
1249                         xReturn = pdPASS;\r
1250                 }\r
1251                 else\r
1252                 {\r
1253                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1254                         xReturn = errQUEUE_FULL;\r
1255                 }\r
1256         }\r
1257         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1258 \r
1259         return xReturn;\r
1260 }\r
1261 /*-----------------------------------------------------------*/\r
1262 \r
1263 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1264 {\r
1265 BaseType_t xEntryTimeSet = pdFALSE;\r
1266 TimeOut_t xTimeOut;\r
1267 int8_t *pcOriginalReadPosition;\r
1268 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1269 \r
1270         configASSERT( pxQueue );\r
1271         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1272         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1273         {\r
1274                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1275         }\r
1276         #endif\r
1277 \r
1278         /* This function relaxes the coding standard somewhat to allow return\r
1279         statements within the function itself.  This is done in the interest\r
1280         of execution time efficiency. */\r
1281 \r
1282         for( ;; )\r
1283         {\r
1284                 taskENTER_CRITICAL();\r
1285                 {\r
1286                         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1287 \r
1288                         /* Is there data in the queue now?  To be running the calling task\r
1289                         must be the highest priority task wanting to access the queue. */\r
1290                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1291                         {\r
1292                                 /* Remember the read position in case the queue is only being\r
1293                                 peeked. */\r
1294                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1295 \r
1296                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1297 \r
1298                                 if( xJustPeeking == pdFALSE )\r
1299                                 {\r
1300                                         traceQUEUE_RECEIVE( pxQueue );\r
1301 \r
1302                                         /* Actually removing data, not just peeking. */\r
1303                                         pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;\r
1304 \r
1305                                         #if ( configUSE_MUTEXES == 1 )\r
1306                                         {\r
1307                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1308                                                 {\r
1309                                                         /* Record the information required to implement\r
1310                                                         priority inheritance should it become necessary. */\r
1311                                                         pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1312                                                 }\r
1313                                                 else\r
1314                                                 {\r
1315                                                         mtCOVERAGE_TEST_MARKER();\r
1316                                                 }\r
1317                                         }\r
1318                                         #endif /* configUSE_MUTEXES */\r
1319 \r
1320                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1321                                         {\r
1322                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1323                                                 {\r
1324                                                         queueYIELD_IF_USING_PREEMPTION();\r
1325                                                 }\r
1326                                                 else\r
1327                                                 {\r
1328                                                         mtCOVERAGE_TEST_MARKER();\r
1329                                                 }\r
1330                                         }\r
1331                                         else\r
1332                                         {\r
1333                                                 mtCOVERAGE_TEST_MARKER();\r
1334                                         }\r
1335                                 }\r
1336                                 else\r
1337                                 {\r
1338                                         traceQUEUE_PEEK( pxQueue );\r
1339 \r
1340                                         /* The data is not being removed, so reset the read\r
1341                                         pointer. */\r
1342                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1343 \r
1344                                         /* The data is being left in the queue, so see if there are\r
1345                                         any other tasks waiting for the data. */\r
1346                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1347                                         {\r
1348                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1349                                                 {\r
1350                                                         /* The task waiting has a higher priority than this task. */\r
1351                                                         queueYIELD_IF_USING_PREEMPTION();\r
1352                                                 }\r
1353                                                 else\r
1354                                                 {\r
1355                                                         mtCOVERAGE_TEST_MARKER();\r
1356                                                 }\r
1357                                         }\r
1358                                         else\r
1359                                         {\r
1360                                                 mtCOVERAGE_TEST_MARKER();\r
1361                                         }\r
1362                                 }\r
1363 \r
1364                                 taskEXIT_CRITICAL();\r
1365                                 return pdPASS;\r
1366                         }\r
1367                         else\r
1368                         {\r
1369                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1370                                 {\r
1371                                         /* The queue was empty and no block time is specified (or\r
1372                                         the block time has expired) so leave now. */\r
1373                                         taskEXIT_CRITICAL();\r
1374                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1375                                         return errQUEUE_EMPTY;\r
1376                                 }\r
1377                                 else if( xEntryTimeSet == pdFALSE )\r
1378                                 {\r
1379                                         /* The queue was empty and a block time was specified so\r
1380                                         configure the timeout structure. */\r
1381                                         vTaskSetTimeOutState( &xTimeOut );\r
1382                                         xEntryTimeSet = pdTRUE;\r
1383                                 }\r
1384                                 else\r
1385                                 {\r
1386                                         /* Entry time was already set. */\r
1387                                         mtCOVERAGE_TEST_MARKER();\r
1388                                 }\r
1389                         }\r
1390                 }\r
1391                 taskEXIT_CRITICAL();\r
1392 \r
1393                 /* Interrupts and other tasks can send to and receive from the queue\r
1394                 now the critical section has been exited. */\r
1395 \r
1396                 vTaskSuspendAll();\r
1397                 prvLockQueue( pxQueue );\r
1398 \r
1399                 /* Update the timeout state to see if it has expired yet. */\r
1400                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1401                 {\r
1402                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1403                         {\r
1404                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1405 \r
1406                                 #if ( configUSE_MUTEXES == 1 )\r
1407                                 {\r
1408                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1409                                         {\r
1410                                                 taskENTER_CRITICAL();\r
1411                                                 {\r
1412                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1413                                                 }\r
1414                                                 taskEXIT_CRITICAL();\r
1415                                         }\r
1416                                         else\r
1417                                         {\r
1418                                                 mtCOVERAGE_TEST_MARKER();\r
1419                                         }\r
1420                                 }\r
1421                                 #endif\r
1422 \r
1423                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1424                                 prvUnlockQueue( pxQueue );\r
1425                                 if( xTaskResumeAll() == pdFALSE )\r
1426                                 {\r
1427                                         portYIELD_WITHIN_API();\r
1428                                 }\r
1429                                 else\r
1430                                 {\r
1431                                         mtCOVERAGE_TEST_MARKER();\r
1432                                 }\r
1433                         }\r
1434                         else\r
1435                         {\r
1436                                 /* Try again. */\r
1437                                 prvUnlockQueue( pxQueue );\r
1438                                 ( void ) xTaskResumeAll();\r
1439                         }\r
1440                 }\r
1441                 else\r
1442                 {\r
1443                         prvUnlockQueue( pxQueue );\r
1444                         ( void ) xTaskResumeAll();\r
1445 \r
1446                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1447                         {\r
1448                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1449                                 return errQUEUE_EMPTY;\r
1450                         }\r
1451                         else\r
1452                         {\r
1453                                 mtCOVERAGE_TEST_MARKER();\r
1454                         }\r
1455                 }\r
1456         }\r
1457 }\r
1458 /*-----------------------------------------------------------*/\r
1459 \r
1460 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1461 {\r
1462 BaseType_t xReturn;\r
1463 UBaseType_t uxSavedInterruptStatus;\r
1464 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1465 \r
1466         configASSERT( pxQueue );\r
1467         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1468 \r
1469         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1470         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1471         above the maximum system call priority are kept permanently enabled, even\r
1472         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1473         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1474         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1475         failure if a FreeRTOS API function is called from an interrupt that has been\r
1476         assigned a priority above the configured maximum system call priority.\r
1477         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1478         that have been assigned a priority at or (logically) below the maximum\r
1479         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1480         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1481         More information (albeit Cortex-M specific) is provided on the following\r
1482         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1483         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1484 \r
1485         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1486         {\r
1487                 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1488 \r
1489                 /* Cannot block in an ISR, so check there is data available. */\r
1490                 if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1491                 {\r
1492                         const int8_t cRxLock = pxQueue->cRxLock;\r
1493 \r
1494                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1495 \r
1496                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1497                         pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;\r
1498 \r
1499                         /* If the queue is locked the event list will not be modified.\r
1500                         Instead update the lock count so the task that unlocks the queue\r
1501                         will know that an ISR has removed data while the queue was\r
1502                         locked. */\r
1503                         if( cRxLock == queueUNLOCKED )\r
1504                         {\r
1505                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1506                                 {\r
1507                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1508                                         {\r
1509                                                 /* The task waiting has a higher priority than us so\r
1510                                                 force a context switch. */\r
1511                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1512                                                 {\r
1513                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1514                                                 }\r
1515                                                 else\r
1516                                                 {\r
1517                                                         mtCOVERAGE_TEST_MARKER();\r
1518                                                 }\r
1519                                         }\r
1520                                         else\r
1521                                         {\r
1522                                                 mtCOVERAGE_TEST_MARKER();\r
1523                                         }\r
1524                                 }\r
1525                                 else\r
1526                                 {\r
1527                                         mtCOVERAGE_TEST_MARKER();\r
1528                                 }\r
1529                         }\r
1530                         else\r
1531                         {\r
1532                                 /* Increment the lock count so the task that unlocks the queue\r
1533                                 knows that data was removed while it was locked. */\r
1534                                 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );\r
1535                         }\r
1536 \r
1537                         xReturn = pdPASS;\r
1538                 }\r
1539                 else\r
1540                 {\r
1541                         xReturn = pdFAIL;\r
1542                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1543                 }\r
1544         }\r
1545         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1546 \r
1547         return xReturn;\r
1548 }\r
1549 /*-----------------------------------------------------------*/\r
1550 \r
1551 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1552 {\r
1553 BaseType_t xReturn;\r
1554 UBaseType_t uxSavedInterruptStatus;\r
1555 int8_t *pcOriginalReadPosition;\r
1556 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1557 \r
1558         configASSERT( pxQueue );\r
1559         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1560         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1561 \r
1562         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1563         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1564         above the maximum system call priority are kept permanently enabled, even\r
1565         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1566         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1567         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1568         failure if a FreeRTOS API function is called from an interrupt that has been\r
1569         assigned a priority above the configured maximum system call priority.\r
1570         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1571         that have been assigned a priority at or (logically) below the maximum\r
1572         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1573         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1574         More information (albeit Cortex-M specific) is provided on the following\r
1575         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1576         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1577 \r
1578         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1579         {\r
1580                 /* Cannot block in an ISR, so check there is data available. */\r
1581                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1582                 {\r
1583                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1584 \r
1585                         /* Remember the read position so it can be reset as nothing is\r
1586                         actually being removed from the queue. */\r
1587                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1588                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1589                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1590 \r
1591                         xReturn = pdPASS;\r
1592                 }\r
1593                 else\r
1594                 {\r
1595                         xReturn = pdFAIL;\r
1596                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1597                 }\r
1598         }\r
1599         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1600 \r
1601         return xReturn;\r
1602 }\r
1603 /*-----------------------------------------------------------*/\r
1604 \r
1605 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1606 {\r
1607 UBaseType_t uxReturn;\r
1608 \r
1609         configASSERT( xQueue );\r
1610 \r
1611         taskENTER_CRITICAL();\r
1612         {\r
1613                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1614         }\r
1615         taskEXIT_CRITICAL();\r
1616 \r
1617         return uxReturn;\r
1618 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1619 /*-----------------------------------------------------------*/\r
1620 \r
1621 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1622 {\r
1623 UBaseType_t uxReturn;\r
1624 Queue_t *pxQueue;\r
1625 \r
1626         pxQueue = ( Queue_t * ) xQueue;\r
1627         configASSERT( pxQueue );\r
1628 \r
1629         taskENTER_CRITICAL();\r
1630         {\r
1631                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1632         }\r
1633         taskEXIT_CRITICAL();\r
1634 \r
1635         return uxReturn;\r
1636 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1637 /*-----------------------------------------------------------*/\r
1638 \r
1639 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1640 {\r
1641 UBaseType_t uxReturn;\r
1642 \r
1643         configASSERT( xQueue );\r
1644 \r
1645         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1646 \r
1647         return uxReturn;\r
1648 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1649 /*-----------------------------------------------------------*/\r
1650 \r
1651 void vQueueDelete( QueueHandle_t xQueue )\r
1652 {\r
1653 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1654 \r
1655         configASSERT( pxQueue );\r
1656         traceQUEUE_DELETE( pxQueue );\r
1657 \r
1658         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1659         {\r
1660                 vQueueUnregisterQueue( pxQueue );\r
1661         }\r
1662         #endif\r
1663 \r
1664         #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )\r
1665         {\r
1666                 /* The queue can only have been allocated dynamically - free it\r
1667                 again. */\r
1668                 vPortFree( pxQueue );\r
1669         }\r
1670         #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
1671         {\r
1672                 /* The queue could have been allocated statically or dynamically, so\r
1673                 check before attempting to free the memory. */\r
1674                 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )\r
1675                 {\r
1676                         vPortFree( pxQueue );\r
1677                 }\r
1678                 else\r
1679                 {\r
1680                         mtCOVERAGE_TEST_MARKER();\r
1681                 }\r
1682         }\r
1683         #else\r
1684         {\r
1685                 /* The queue must have been statically allocated, so is not going to be\r
1686                 deleted.  Avoid compiler warnings about the unused parameter. */\r
1687                 ( void ) pxQueue;\r
1688         }\r
1689         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
1690 }\r
1691 /*-----------------------------------------------------------*/\r
1692 \r
1693 #if ( configUSE_TRACE_FACILITY == 1 )\r
1694 \r
1695         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1696         {\r
1697                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1698         }\r
1699 \r
1700 #endif /* configUSE_TRACE_FACILITY */\r
1701 /*-----------------------------------------------------------*/\r
1702 \r
1703 #if ( configUSE_TRACE_FACILITY == 1 )\r
1704 \r
1705         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1706         {\r
1707                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1708         }\r
1709 \r
1710 #endif /* configUSE_TRACE_FACILITY */\r
1711 /*-----------------------------------------------------------*/\r
1712 \r
1713 #if ( configUSE_TRACE_FACILITY == 1 )\r
1714 \r
1715         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1716         {\r
1717                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1718         }\r
1719 \r
1720 #endif /* configUSE_TRACE_FACILITY */\r
1721 /*-----------------------------------------------------------*/\r
1722 \r
1723 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1724 {\r
1725 BaseType_t xReturn = pdFALSE;\r
1726 UBaseType_t uxMessagesWaiting;\r
1727 \r
1728         /* This function is called from a critical section. */\r
1729 \r
1730         uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1731 \r
1732         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1733         {\r
1734                 #if ( configUSE_MUTEXES == 1 )\r
1735                 {\r
1736                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1737                         {\r
1738                                 /* The mutex is no longer being held. */\r
1739                                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1740                                 pxQueue->pxMutexHolder = NULL;\r
1741                         }\r
1742                         else\r
1743                         {\r
1744                                 mtCOVERAGE_TEST_MARKER();\r
1745                         }\r
1746                 }\r
1747                 #endif /* configUSE_MUTEXES */\r
1748         }\r
1749         else if( xPosition == queueSEND_TO_BACK )\r
1750         {\r
1751                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1752                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1753                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1754                 {\r
1755                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1756                 }\r
1757                 else\r
1758                 {\r
1759                         mtCOVERAGE_TEST_MARKER();\r
1760                 }\r
1761         }\r
1762         else\r
1763         {\r
1764                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1765                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1766                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1767                 {\r
1768                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1769                 }\r
1770                 else\r
1771                 {\r
1772                         mtCOVERAGE_TEST_MARKER();\r
1773                 }\r
1774 \r
1775                 if( xPosition == queueOVERWRITE )\r
1776                 {\r
1777                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1778                         {\r
1779                                 /* An item is not being added but overwritten, so subtract\r
1780                                 one from the recorded number of items in the queue so when\r
1781                                 one is added again below the number of recorded items remains\r
1782                                 correct. */\r
1783                                 --uxMessagesWaiting;\r
1784                         }\r
1785                         else\r
1786                         {\r
1787                                 mtCOVERAGE_TEST_MARKER();\r
1788                         }\r
1789                 }\r
1790                 else\r
1791                 {\r
1792                         mtCOVERAGE_TEST_MARKER();\r
1793                 }\r
1794         }\r
1795 \r
1796         pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;\r
1797 \r
1798         return xReturn;\r
1799 }\r
1800 /*-----------------------------------------------------------*/\r
1801 \r
1802 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
1803 {\r
1804         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
1805         {\r
1806                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1807                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
1808                 {\r
1809                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1810                 }\r
1811                 else\r
1812                 {\r
1813                         mtCOVERAGE_TEST_MARKER();\r
1814                 }\r
1815                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
1816         }\r
1817 }\r
1818 /*-----------------------------------------------------------*/\r
1819 \r
1820 static void prvUnlockQueue( Queue_t * const pxQueue )\r
1821 {\r
1822         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
1823 \r
1824         /* The lock counts contains the number of extra data items placed or\r
1825         removed from the queue while the queue was locked.  When a queue is\r
1826         locked items can be added or removed, but the event lists cannot be\r
1827         updated. */\r
1828         taskENTER_CRITICAL();\r
1829         {\r
1830                 int8_t cTxLock = pxQueue->cTxLock;\r
1831 \r
1832                 /* See if data was added to the queue while it was locked. */\r
1833                 while( cTxLock > queueLOCKED_UNMODIFIED )\r
1834                 {\r
1835                         /* Data was posted while the queue was locked.  Are any tasks\r
1836                         blocked waiting for data to become available? */\r
1837                         #if ( configUSE_QUEUE_SETS == 1 )\r
1838                         {\r
1839                                 if( pxQueue->pxQueueSetContainer != NULL )\r
1840                                 {\r
1841                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )\r
1842                                         {\r
1843                                                 /* The queue is a member of a queue set, and posting to\r
1844                                                 the queue set caused a higher priority task to unblock.\r
1845                                                 A context switch is required. */\r
1846                                                 vTaskMissedYield();\r
1847                                         }\r
1848                                         else\r
1849                                         {\r
1850                                                 mtCOVERAGE_TEST_MARKER();\r
1851                                         }\r
1852                                 }\r
1853                                 else\r
1854                                 {\r
1855                                         /* Tasks that are removed from the event list will get\r
1856                                         added to the pending ready list as the scheduler is still\r
1857                                         suspended. */\r
1858                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1859                                         {\r
1860                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1861                                                 {\r
1862                                                         /* The task waiting has a higher priority so record that a\r
1863                                                         context switch is required. */\r
1864                                                         vTaskMissedYield();\r
1865                                                 }\r
1866                                                 else\r
1867                                                 {\r
1868                                                         mtCOVERAGE_TEST_MARKER();\r
1869                                                 }\r
1870                                         }\r
1871                                         else\r
1872                                         {\r
1873                                                 break;\r
1874                                         }\r
1875                                 }\r
1876                         }\r
1877                         #else /* configUSE_QUEUE_SETS */\r
1878                         {\r
1879                                 /* Tasks that are removed from the event list will get added to\r
1880                                 the pending ready list as the scheduler is still suspended. */\r
1881                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1882                                 {\r
1883                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1884                                         {\r
1885                                                 /* The task waiting has a higher priority so record that\r
1886                                                 a context switch is required. */\r
1887                                                 vTaskMissedYield();\r
1888                                         }\r
1889                                         else\r
1890                                         {\r
1891                                                 mtCOVERAGE_TEST_MARKER();\r
1892                                         }\r
1893                                 }\r
1894                                 else\r
1895                                 {\r
1896                                         break;\r
1897                                 }\r
1898                         }\r
1899                         #endif /* configUSE_QUEUE_SETS */\r
1900 \r
1901                         --cTxLock;\r
1902                 }\r
1903 \r
1904                 pxQueue->cTxLock = queueUNLOCKED;\r
1905         }\r
1906         taskEXIT_CRITICAL();\r
1907 \r
1908         /* Do the same for the Rx lock. */\r
1909         taskENTER_CRITICAL();\r
1910         {\r
1911                 int8_t cRxLock = pxQueue->cRxLock;\r
1912 \r
1913                 while( cRxLock > queueLOCKED_UNMODIFIED )\r
1914                 {\r
1915                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1916                         {\r
1917                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1918                                 {\r
1919                                         vTaskMissedYield();\r
1920                                 }\r
1921                                 else\r
1922                                 {\r
1923                                         mtCOVERAGE_TEST_MARKER();\r
1924                                 }\r
1925 \r
1926                                 --cRxLock;\r
1927                         }\r
1928                         else\r
1929                         {\r
1930                                 break;\r
1931                         }\r
1932                 }\r
1933 \r
1934                 pxQueue->cRxLock = queueUNLOCKED;\r
1935         }\r
1936         taskEXIT_CRITICAL();\r
1937 }\r
1938 /*-----------------------------------------------------------*/\r
1939 \r
1940 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
1941 {\r
1942 BaseType_t xReturn;\r
1943 \r
1944         taskENTER_CRITICAL();\r
1945         {\r
1946                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
1947                 {\r
1948                         xReturn = pdTRUE;\r
1949                 }\r
1950                 else\r
1951                 {\r
1952                         xReturn = pdFALSE;\r
1953                 }\r
1954         }\r
1955         taskEXIT_CRITICAL();\r
1956 \r
1957         return xReturn;\r
1958 }\r
1959 /*-----------------------------------------------------------*/\r
1960 \r
1961 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
1962 {\r
1963 BaseType_t xReturn;\r
1964 \r
1965         configASSERT( xQueue );\r
1966         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
1967         {\r
1968                 xReturn = pdTRUE;\r
1969         }\r
1970         else\r
1971         {\r
1972                 xReturn = pdFALSE;\r
1973         }\r
1974 \r
1975         return xReturn;\r
1976 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
1977 /*-----------------------------------------------------------*/\r
1978 \r
1979 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
1980 {\r
1981 BaseType_t xReturn;\r
1982 \r
1983         taskENTER_CRITICAL();\r
1984         {\r
1985                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
1986                 {\r
1987                         xReturn = pdTRUE;\r
1988                 }\r
1989                 else\r
1990                 {\r
1991                         xReturn = pdFALSE;\r
1992                 }\r
1993         }\r
1994         taskEXIT_CRITICAL();\r
1995 \r
1996         return xReturn;\r
1997 }\r
1998 /*-----------------------------------------------------------*/\r
1999 \r
2000 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2001 {\r
2002 BaseType_t xReturn;\r
2003 \r
2004         configASSERT( xQueue );\r
2005         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
2006         {\r
2007                 xReturn = pdTRUE;\r
2008         }\r
2009         else\r
2010         {\r
2011                 xReturn = pdFALSE;\r
2012         }\r
2013 \r
2014         return xReturn;\r
2015 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2016 /*-----------------------------------------------------------*/\r
2017 \r
2018 #if ( configUSE_CO_ROUTINES == 1 )\r
2019 \r
2020         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2021         {\r
2022         BaseType_t xReturn;\r
2023         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2024 \r
2025                 /* If the queue is already full we may have to block.  A critical section\r
2026                 is required to prevent an interrupt removing something from the queue\r
2027                 between the check to see if the queue is full and blocking on the queue. */\r
2028                 portDISABLE_INTERRUPTS();\r
2029                 {\r
2030                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2031                         {\r
2032                                 /* The queue is full - do we want to block or just leave without\r
2033                                 posting? */\r
2034                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2035                                 {\r
2036                                         /* As this is called from a coroutine we cannot block directly, but\r
2037                                         return indicating that we need to block. */\r
2038                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2039                                         portENABLE_INTERRUPTS();\r
2040                                         return errQUEUE_BLOCKED;\r
2041                                 }\r
2042                                 else\r
2043                                 {\r
2044                                         portENABLE_INTERRUPTS();\r
2045                                         return errQUEUE_FULL;\r
2046                                 }\r
2047                         }\r
2048                 }\r
2049                 portENABLE_INTERRUPTS();\r
2050 \r
2051                 portDISABLE_INTERRUPTS();\r
2052                 {\r
2053                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2054                         {\r
2055                                 /* There is room in the queue, copy the data into the queue. */\r
2056                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2057                                 xReturn = pdPASS;\r
2058 \r
2059                                 /* Were any co-routines waiting for data to become available? */\r
2060                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2061                                 {\r
2062                                         /* In this instance the co-routine could be placed directly\r
2063                                         into the ready list as we are within a critical section.\r
2064                                         Instead the same pending ready list mechanism is used as if\r
2065                                         the event were caused from within an interrupt. */\r
2066                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2067                                         {\r
2068                                                 /* The co-routine waiting has a higher priority so record\r
2069                                                 that a yield might be appropriate. */\r
2070                                                 xReturn = errQUEUE_YIELD;\r
2071                                         }\r
2072                                         else\r
2073                                         {\r
2074                                                 mtCOVERAGE_TEST_MARKER();\r
2075                                         }\r
2076                                 }\r
2077                                 else\r
2078                                 {\r
2079                                         mtCOVERAGE_TEST_MARKER();\r
2080                                 }\r
2081                         }\r
2082                         else\r
2083                         {\r
2084                                 xReturn = errQUEUE_FULL;\r
2085                         }\r
2086                 }\r
2087                 portENABLE_INTERRUPTS();\r
2088 \r
2089                 return xReturn;\r
2090         }\r
2091 \r
2092 #endif /* configUSE_CO_ROUTINES */\r
2093 /*-----------------------------------------------------------*/\r
2094 \r
2095 #if ( configUSE_CO_ROUTINES == 1 )\r
2096 \r
2097         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2098         {\r
2099         BaseType_t xReturn;\r
2100         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2101 \r
2102                 /* If the queue is already empty we may have to block.  A critical section\r
2103                 is required to prevent an interrupt adding something to the queue\r
2104                 between the check to see if the queue is empty and blocking on the queue. */\r
2105                 portDISABLE_INTERRUPTS();\r
2106                 {\r
2107                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2108                         {\r
2109                                 /* There are no messages in the queue, do we want to block or just\r
2110                                 leave with nothing? */\r
2111                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2112                                 {\r
2113                                         /* As this is a co-routine we cannot block directly, but return\r
2114                                         indicating that we need to block. */\r
2115                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2116                                         portENABLE_INTERRUPTS();\r
2117                                         return errQUEUE_BLOCKED;\r
2118                                 }\r
2119                                 else\r
2120                                 {\r
2121                                         portENABLE_INTERRUPTS();\r
2122                                         return errQUEUE_FULL;\r
2123                                 }\r
2124                         }\r
2125                         else\r
2126                         {\r
2127                                 mtCOVERAGE_TEST_MARKER();\r
2128                         }\r
2129                 }\r
2130                 portENABLE_INTERRUPTS();\r
2131 \r
2132                 portDISABLE_INTERRUPTS();\r
2133                 {\r
2134                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2135                         {\r
2136                                 /* Data is available from the queue. */\r
2137                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2138                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2139                                 {\r
2140                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2141                                 }\r
2142                                 else\r
2143                                 {\r
2144                                         mtCOVERAGE_TEST_MARKER();\r
2145                                 }\r
2146                                 --( pxQueue->uxMessagesWaiting );\r
2147                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2148 \r
2149                                 xReturn = pdPASS;\r
2150 \r
2151                                 /* Were any co-routines waiting for space to become available? */\r
2152                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2153                                 {\r
2154                                         /* In this instance the co-routine could be placed directly\r
2155                                         into the ready list as we are within a critical section.\r
2156                                         Instead the same pending ready list mechanism is used as if\r
2157                                         the event were caused from within an interrupt. */\r
2158                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2159                                         {\r
2160                                                 xReturn = errQUEUE_YIELD;\r
2161                                         }\r
2162                                         else\r
2163                                         {\r
2164                                                 mtCOVERAGE_TEST_MARKER();\r
2165                                         }\r
2166                                 }\r
2167                                 else\r
2168                                 {\r
2169                                         mtCOVERAGE_TEST_MARKER();\r
2170                                 }\r
2171                         }\r
2172                         else\r
2173                         {\r
2174                                 xReturn = pdFAIL;\r
2175                         }\r
2176                 }\r
2177                 portENABLE_INTERRUPTS();\r
2178 \r
2179                 return xReturn;\r
2180         }\r
2181 \r
2182 #endif /* configUSE_CO_ROUTINES */\r
2183 /*-----------------------------------------------------------*/\r
2184 \r
2185 #if ( configUSE_CO_ROUTINES == 1 )\r
2186 \r
2187         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2188         {\r
2189         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2190 \r
2191                 /* Cannot block within an ISR so if there is no space on the queue then\r
2192                 exit without doing anything. */\r
2193                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2194                 {\r
2195                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2196 \r
2197                         /* We only want to wake one co-routine per ISR, so check that a\r
2198                         co-routine has not already been woken. */\r
2199                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2200                         {\r
2201                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2202                                 {\r
2203                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2204                                         {\r
2205                                                 return pdTRUE;\r
2206                                         }\r
2207                                         else\r
2208                                         {\r
2209                                                 mtCOVERAGE_TEST_MARKER();\r
2210                                         }\r
2211                                 }\r
2212                                 else\r
2213                                 {\r
2214                                         mtCOVERAGE_TEST_MARKER();\r
2215                                 }\r
2216                         }\r
2217                         else\r
2218                         {\r
2219                                 mtCOVERAGE_TEST_MARKER();\r
2220                         }\r
2221                 }\r
2222                 else\r
2223                 {\r
2224                         mtCOVERAGE_TEST_MARKER();\r
2225                 }\r
2226 \r
2227                 return xCoRoutinePreviouslyWoken;\r
2228         }\r
2229 \r
2230 #endif /* configUSE_CO_ROUTINES */\r
2231 /*-----------------------------------------------------------*/\r
2232 \r
2233 #if ( configUSE_CO_ROUTINES == 1 )\r
2234 \r
2235         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2236         {\r
2237         BaseType_t xReturn;\r
2238         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2239 \r
2240                 /* We cannot block from an ISR, so check there is data available. If\r
2241                 not then just leave without doing anything. */\r
2242                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2243                 {\r
2244                         /* Copy the data from the queue. */\r
2245                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2246                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2247                         {\r
2248                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2249                         }\r
2250                         else\r
2251                         {\r
2252                                 mtCOVERAGE_TEST_MARKER();\r
2253                         }\r
2254                         --( pxQueue->uxMessagesWaiting );\r
2255                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2256 \r
2257                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2258                         {\r
2259                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2260                                 {\r
2261                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2262                                         {\r
2263                                                 *pxCoRoutineWoken = pdTRUE;\r
2264                                         }\r
2265                                         else\r
2266                                         {\r
2267                                                 mtCOVERAGE_TEST_MARKER();\r
2268                                         }\r
2269                                 }\r
2270                                 else\r
2271                                 {\r
2272                                         mtCOVERAGE_TEST_MARKER();\r
2273                                 }\r
2274                         }\r
2275                         else\r
2276                         {\r
2277                                 mtCOVERAGE_TEST_MARKER();\r
2278                         }\r
2279 \r
2280                         xReturn = pdPASS;\r
2281                 }\r
2282                 else\r
2283                 {\r
2284                         xReturn = pdFAIL;\r
2285                 }\r
2286 \r
2287                 return xReturn;\r
2288         }\r
2289 \r
2290 #endif /* configUSE_CO_ROUTINES */\r
2291 /*-----------------------------------------------------------*/\r
2292 \r
2293 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2294 \r
2295         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2296         {\r
2297         UBaseType_t ux;\r
2298 \r
2299                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2300                 a free slot. */\r
2301                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2302                 {\r
2303                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2304                         {\r
2305                                 /* Store the information on this queue. */\r
2306                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2307                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2308 \r
2309                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2310                                 break;\r
2311                         }\r
2312                         else\r
2313                         {\r
2314                                 mtCOVERAGE_TEST_MARKER();\r
2315                         }\r
2316                 }\r
2317         }\r
2318 \r
2319 #endif /* configQUEUE_REGISTRY_SIZE */\r
2320 /*-----------------------------------------------------------*/\r
2321 \r
2322 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2323 \r
2324         const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2325         {\r
2326         UBaseType_t ux;\r
2327         const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2328 \r
2329                 /* Note there is nothing here to protect against another task adding or\r
2330                 removing entries from the registry while it is being searched. */\r
2331                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2332                 {\r
2333                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2334                         {\r
2335                                 pcReturn = xQueueRegistry[ ux ].pcQueueName;\r
2336                                 break;\r
2337                         }\r
2338                         else\r
2339                         {\r
2340                                 mtCOVERAGE_TEST_MARKER();\r
2341                         }\r
2342                 }\r
2343 \r
2344                 return pcReturn;\r
2345         }\r
2346 \r
2347 #endif /* configQUEUE_REGISTRY_SIZE */\r
2348 /*-----------------------------------------------------------*/\r
2349 \r
2350 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2351 \r
2352         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2353         {\r
2354         UBaseType_t ux;\r
2355 \r
2356                 /* See if the handle of the queue being unregistered in actually in the\r
2357                 registry. */\r
2358                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2359                 {\r
2360                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2361                         {\r
2362                                 /* Set the name to NULL to show that this slot if free again. */\r
2363                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2364 \r
2365                                 /* Set the handle to NULL to ensure the same queue handle cannot\r
2366                                 appear in the registry twice if it is added, removed, then\r
2367                                 added again. */\r
2368                                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;\r
2369                                 break;\r
2370                         }\r
2371                         else\r
2372                         {\r
2373                                 mtCOVERAGE_TEST_MARKER();\r
2374                         }\r
2375                 }\r
2376 \r
2377         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2378 \r
2379 #endif /* configQUEUE_REGISTRY_SIZE */\r
2380 /*-----------------------------------------------------------*/\r
2381 \r
2382 #if ( configUSE_TIMERS == 1 )\r
2383 \r
2384         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
2385         {\r
2386         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2387 \r
2388                 /* This function should not be called by application code hence the\r
2389                 'Restricted' in its name.  It is not part of the public API.  It is\r
2390                 designed for use by kernel code, and has special calling requirements.\r
2391                 It can result in vListInsert() being called on a list that can only\r
2392                 possibly ever have one item in it, so the list will be fast, but even\r
2393                 so it should be called with the scheduler locked and not from a critical\r
2394                 section. */\r
2395 \r
2396                 /* Only do anything if there are no messages in the queue.  This function\r
2397                 will not actually cause the task to block, just place it on a blocked\r
2398                 list.  It will not block until the scheduler is unlocked - at which\r
2399                 time a yield will be performed.  If an item is added to the queue while\r
2400                 the queue is locked, and the calling task blocks on the queue, then the\r
2401                 calling task will be immediately unblocked when the queue is unlocked. */\r
2402                 prvLockQueue( pxQueue );\r
2403                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2404                 {\r
2405                         /* There is nothing in the queue, block for the specified period. */\r
2406                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
2407                 }\r
2408                 else\r
2409                 {\r
2410                         mtCOVERAGE_TEST_MARKER();\r
2411                 }\r
2412                 prvUnlockQueue( pxQueue );\r
2413         }\r
2414 \r
2415 #endif /* configUSE_TIMERS */\r
2416 /*-----------------------------------------------------------*/\r
2417 \r
2418 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
2419 \r
2420         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2421         {\r
2422         QueueSetHandle_t pxQueue;\r
2423 \r
2424                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2425 \r
2426                 return pxQueue;\r
2427         }\r
2428 \r
2429 #endif /* configUSE_QUEUE_SETS */\r
2430 /*-----------------------------------------------------------*/\r
2431 \r
2432 #if ( configUSE_QUEUE_SETS == 1 )\r
2433 \r
2434         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2435         {\r
2436         BaseType_t xReturn;\r
2437 \r
2438                 taskENTER_CRITICAL();\r
2439                 {\r
2440                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2441                         {\r
2442                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2443                                 xReturn = pdFAIL;\r
2444                         }\r
2445                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2446                         {\r
2447                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2448                                 items in the queue/semaphore. */\r
2449                                 xReturn = pdFAIL;\r
2450                         }\r
2451                         else\r
2452                         {\r
2453                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2454                                 xReturn = pdPASS;\r
2455                         }\r
2456                 }\r
2457                 taskEXIT_CRITICAL();\r
2458 \r
2459                 return xReturn;\r
2460         }\r
2461 \r
2462 #endif /* configUSE_QUEUE_SETS */\r
2463 /*-----------------------------------------------------------*/\r
2464 \r
2465 #if ( configUSE_QUEUE_SETS == 1 )\r
2466 \r
2467         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2468         {\r
2469         BaseType_t xReturn;\r
2470         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2471 \r
2472                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2473                 {\r
2474                         /* The queue was not a member of the set. */\r
2475                         xReturn = pdFAIL;\r
2476                 }\r
2477                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2478                 {\r
2479                         /* It is dangerous to remove a queue from a set when the queue is\r
2480                         not empty because the queue set will still hold pending events for\r
2481                         the queue. */\r
2482                         xReturn = pdFAIL;\r
2483                 }\r
2484                 else\r
2485                 {\r
2486                         taskENTER_CRITICAL();\r
2487                         {\r
2488                                 /* The queue is no longer contained in the set. */\r
2489                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2490                         }\r
2491                         taskEXIT_CRITICAL();\r
2492                         xReturn = pdPASS;\r
2493                 }\r
2494 \r
2495                 return xReturn;\r
2496         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2497 \r
2498 #endif /* configUSE_QUEUE_SETS */\r
2499 /*-----------------------------------------------------------*/\r
2500 \r
2501 #if ( configUSE_QUEUE_SETS == 1 )\r
2502 \r
2503         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2504         {\r
2505         QueueSetMemberHandle_t xReturn = NULL;\r
2506 \r
2507                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2508                 return xReturn;\r
2509         }\r
2510 \r
2511 #endif /* configUSE_QUEUE_SETS */\r
2512 /*-----------------------------------------------------------*/\r
2513 \r
2514 #if ( configUSE_QUEUE_SETS == 1 )\r
2515 \r
2516         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2517         {\r
2518         QueueSetMemberHandle_t xReturn = NULL;\r
2519 \r
2520                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2521                 return xReturn;\r
2522         }\r
2523 \r
2524 #endif /* configUSE_QUEUE_SETS */\r
2525 /*-----------------------------------------------------------*/\r
2526 \r
2527 #if ( configUSE_QUEUE_SETS == 1 )\r
2528 \r
2529         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2530         {\r
2531         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2532         BaseType_t xReturn = pdFALSE;\r
2533 \r
2534                 /* This function must be called form a critical section. */\r
2535 \r
2536                 configASSERT( pxQueueSetContainer );\r
2537                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2538 \r
2539                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2540                 {\r
2541                         const int8_t cTxLock = pxQueueSetContainer->cTxLock;\r
2542 \r
2543                         traceQUEUE_SEND( pxQueueSetContainer );\r
2544 \r
2545                         /* The data copied is the handle of the queue that contains data. */\r
2546                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2547 \r
2548                         if( cTxLock == queueUNLOCKED )\r
2549                         {\r
2550                                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2551                                 {\r
2552                                         if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2553                                         {\r
2554                                                 /* The task waiting has a higher priority. */\r
2555                                                 xReturn = pdTRUE;\r
2556                                         }\r
2557                                         else\r
2558                                         {\r
2559                                                 mtCOVERAGE_TEST_MARKER();\r
2560                                         }\r
2561                                 }\r
2562                                 else\r
2563                                 {\r
2564                                         mtCOVERAGE_TEST_MARKER();\r
2565                                 }\r
2566                         }\r
2567                         else\r
2568                         {\r
2569                                 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
2570                         }\r
2571                 }\r
2572                 else\r
2573                 {\r
2574                         mtCOVERAGE_TEST_MARKER();\r
2575                 }\r
2576 \r
2577                 return xReturn;\r
2578         }\r
2579 \r
2580 #endif /* configUSE_QUEUE_SETS */\r
2581 \r
2582 \r
2583 \r
2584 \r
2585 \r
2586 \r
2587 \r
2588 \r
2589 \r
2590 \r
2591 \r
2592 \r