]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Implement functionality that allows the memory required to create a queue or semaphor...
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     This file is part of the FreeRTOS distribution.\r
8 \r
9     FreeRTOS is free software; you can redistribute it and/or modify it under\r
10     the terms of the GNU General Public License (version 2) as published by the\r
11     Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.\r
12 \r
13     ***************************************************************************\r
14     >>!   NOTE: The modification to the GPL is included to allow you to     !<<\r
15     >>!   distribute a combined work that includes FreeRTOS without being   !<<\r
16     >>!   obliged to provide the source code for proprietary components     !<<\r
17     >>!   outside of the FreeRTOS kernel.                                   !<<\r
18     ***************************************************************************\r
19 \r
20     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
21     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
22     FOR A PARTICULAR PURPOSE.  Full license text is available on the following\r
23     link: http://www.freertos.org/a00114.html\r
24 \r
25     ***************************************************************************\r
26      *                                                                       *\r
27      *    FreeRTOS provides completely free yet professionally developed,    *\r
28      *    robust, strictly quality controlled, supported, and cross          *\r
29      *    platform software that is more than just the market leader, it     *\r
30      *    is the industry's de facto standard.                               *\r
31      *                                                                       *\r
32      *    Help yourself get started quickly while simultaneously helping     *\r
33      *    to support the FreeRTOS project by purchasing a FreeRTOS           *\r
34      *    tutorial book, reference manual, or both:                          *\r
35      *    http://www.FreeRTOS.org/Documentation                              *\r
36      *                                                                       *\r
37     ***************************************************************************\r
38 \r
39     http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading\r
40     the FAQ page "My application does not run, what could be wrong?".  Have you\r
41     defined configASSERT()?\r
42 \r
43     http://www.FreeRTOS.org/support - In return for receiving this top quality\r
44     embedded software for free we request you assist our global community by\r
45     participating in the support forum.\r
46 \r
47     http://www.FreeRTOS.org/training - Investing in training allows your team to\r
48     be as productive as possible as early as possible.  Now you can receive\r
49     FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers\r
50     Ltd, and the world's leading authority on the world's leading RTOS.\r
51 \r
52     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
53     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
54     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
55 \r
56     http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
57     Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
58 \r
59     http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High\r
60     Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
61     licenses offer ticketed support, indemnification and commercial middleware.\r
62 \r
63     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
64     engineered and independently SIL3 certified version for use in safety and\r
65     mission critical applications that require provable dependability.\r
66 \r
67     1 tab == 4 spaces!\r
68 */\r
69 \r
70 #include <stdlib.h>\r
71 #include <string.h>\r
72 \r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
74 all the API functions to use the MPU wrappers.  That should only be done when\r
75 task.h is included from an application file. */\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
77 \r
78 #include "FreeRTOS.h"\r
79 #include "task.h"\r
80 #include "queue.h"\r
81 \r
82 #if ( configUSE_CO_ROUTINES == 1 )\r
83         #include "croutine.h"\r
84 #endif\r
85 \r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
88 header files above, but not in this file, in order to generate the correct\r
89 privileged Vs unprivileged linkage and placement. */\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
91 \r
92 \r
93 /* Constants used with the xRxLock and xTxLock structure members. */\r
94 #define queueUNLOCKED                                   ( ( BaseType_t ) -1 )\r
95 #define queueLOCKED_UNMODIFIED                  ( ( BaseType_t ) 0 )\r
96 \r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
98 pcTail members are used as pointers into the queue storage area.  When the\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
101 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
102 names to the pcHead and pcTail structure members to ensure the readability of\r
103 the code is maintained despite this dual use of two structure members.  An\r
104 alternative implementation would be to use a union, but use of a union is\r
105 against the coding standard (although an exception to the standard has been\r
106 permitted where the dual use also significantly changes the type of the\r
107 structure member). */\r
108 #define pxMutexHolder                                   pcTail\r
109 #define uxQueueType                                             pcHead\r
110 #define queueQUEUE_IS_MUTEX                             NULL\r
111 \r
112 /* Semaphores do not actually store or copy data, so have an item size of\r
113 zero. */\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
115 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
116 \r
117 /* Bits that can be set in xQUEUE->ucStaticAllocationFlags to indicate that the\r
118 queue storage area and queue structure were statically allocated respectively.\r
119 When these are statically allocated they won't be freed if the queue gets\r
120 deleted. */\r
121 #define queueSTATICALLY_ALLOCATED_STORAGE               ( ( uint8_t ) 0x01 )\r
122 #define queueSTATICALLY_ALLOCATED_QUEUE_STRUCT  ( ( uint8_t ) 0x02 )\r
123 \r
124 #if( configUSE_PREEMPTION == 0 )\r
125         /* If the cooperative scheduler is being used then a yield should not be\r
126         performed just because a higher priority task has been woken. */\r
127         #define queueYIELD_IF_USING_PREEMPTION()\r
128 #else\r
129         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
130 #endif\r
131 \r
132 /*\r
133  * Definition of the queue used by the scheduler.\r
134  * Items are queued by copy, not reference.  See the following link for the\r
135  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
136  */\r
137 typedef struct QueueDefinition\r
138 {\r
139         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
140         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
141         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
142 \r
143         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
144         {\r
145                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
146                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
147         } u;\r
148 \r
149         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
150         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
151 \r
152         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
153         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
154         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
155 \r
156         volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
157         volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
158 \r
159         #if ( configUSE_QUEUE_SETS == 1 )\r
160                 struct QueueDefinition *pxQueueSetContainer;\r
161         #endif\r
162 \r
163         #if ( configUSE_TRACE_FACILITY == 1 )\r
164                 UBaseType_t uxQueueNumber;\r
165                 uint8_t ucQueueType;\r
166         #endif\r
167 \r
168         #if ( configSUPPORT_STATIC_ALLOCATION == 1 )\r
169                 uint8_t ucStaticAllocationFlags;\r
170         #endif\r
171 \r
172 } xQUEUE;\r
173 \r
174 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
175 name below to enable the use of older kernel aware debuggers. */\r
176 typedef xQUEUE Queue_t;\r
177 \r
178 /*-----------------------------------------------------------*/\r
179 \r
180 /*\r
181  * The queue registry is just a means for kernel aware debuggers to locate\r
182  * queue structures.  It has no other purpose so is an optional component.\r
183  */\r
184 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
185 \r
186         /* The type stored within the queue registry array.  This allows a name\r
187         to be assigned to each queue making kernel aware debugging a little\r
188         more user friendly. */\r
189         typedef struct QUEUE_REGISTRY_ITEM\r
190         {\r
191                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
192                 QueueHandle_t xHandle;\r
193         } xQueueRegistryItem;\r
194 \r
195         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
196         new xQueueRegistryItem name below to enable the use of older kernel aware\r
197         debuggers. */\r
198         typedef xQueueRegistryItem QueueRegistryItem_t;\r
199 \r
200         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
201         The pcQueueName member of a structure being NULL is indicative of the\r
202         array position being vacant. */\r
203         PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
204 \r
205 #endif /* configQUEUE_REGISTRY_SIZE */\r
206 \r
207 /*\r
208  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
209  * prevent an ISR from adding or removing items to the queue, but does prevent\r
210  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
211  * queue is locked it will instead increment the appropriate queue lock count\r
212  * to indicate that a task may require unblocking.  When the queue in unlocked\r
213  * these lock counts are inspected, and the appropriate action taken.\r
214  */\r
215 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
216 \r
217 /*\r
218  * Uses a critical section to determine if there is any data in a queue.\r
219  *\r
220  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
221  */\r
222 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
223 \r
224 /*\r
225  * Uses a critical section to determine if there is any space in a queue.\r
226  *\r
227  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
228  */\r
229 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
230 \r
231 /*\r
232  * Copies an item into the queue, either at the front of the queue or the\r
233  * back of the queue.\r
234  */\r
235 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
236 \r
237 /*\r
238  * Copies an item out of a queue.\r
239  */\r
240 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
241 \r
242 /*\r
243  * A queue requires two blocks of memory; a structure to hold the queue state\r
244  * and a storage area to hold the items in the queue.  The memory is assigned\r
245  * by prvAllocateQueueMemory().  If ppucQueueStorage is NULL then the queue\r
246  * storage will allocated dynamically, otherwise the buffer passed in\r
247  * ppucQueueStorage will be used.  If pxStaticQueue is NULL then the queue\r
248  * structure will be allocated dynamically, otherwise the buffer pointed to by\r
249  * pxStaticQueue will be used.\r
250  */\r
251 static Queue_t *prvAllocateQueueMemory( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t **ppucQueueStorage, StaticQueue_t *pxStaticQueue );\r
252 \r
253 #if ( configUSE_QUEUE_SETS == 1 )\r
254         /*\r
255          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
256          * the queue set that the queue contains data.\r
257          */\r
258         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
259 #endif\r
260 \r
261 /*-----------------------------------------------------------*/\r
262 \r
263 /*\r
264  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
265  * accessing the queue event lists.\r
266  */\r
267 #define prvLockQueue( pxQueue )                                                         \\r
268         taskENTER_CRITICAL();                                                                   \\r
269         {                                                                                                               \\r
270                 if( ( pxQueue )->xRxLock == queueUNLOCKED )                     \\r
271                 {                                                                                                       \\r
272                         ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \\r
273                 }                                                                                                       \\r
274                 if( ( pxQueue )->xTxLock == queueUNLOCKED )                     \\r
275                 {                                                                                                       \\r
276                         ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \\r
277                 }                                                                                                       \\r
278         }                                                                                                               \\r
279         taskEXIT_CRITICAL()\r
280 /*-----------------------------------------------------------*/\r
281 \r
282 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
283 {\r
284 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
285 \r
286         configASSERT( pxQueue );\r
287 \r
288         taskENTER_CRITICAL();\r
289         {\r
290                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
291                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
292                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
293                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
294                 pxQueue->xRxLock = queueUNLOCKED;\r
295                 pxQueue->xTxLock = queueUNLOCKED;\r
296 \r
297                 if( xNewQueue == pdFALSE )\r
298                 {\r
299                         /* If there are tasks blocked waiting to read from the queue, then\r
300                         the tasks will remain blocked as after this function exits the queue\r
301                         will still be empty.  If there are tasks blocked waiting to write to\r
302                         the queue, then one should be unblocked as after this function exits\r
303                         it will be possible to write to it. */\r
304                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
305                         {\r
306                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
307                                 {\r
308                                         queueYIELD_IF_USING_PREEMPTION();\r
309                                 }\r
310                                 else\r
311                                 {\r
312                                         mtCOVERAGE_TEST_MARKER();\r
313                                 }\r
314                         }\r
315                         else\r
316                         {\r
317                                 mtCOVERAGE_TEST_MARKER();\r
318                         }\r
319                 }\r
320                 else\r
321                 {\r
322                         /* Ensure the event queues start in the correct state. */\r
323                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
324                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
325                 }\r
326         }\r
327         taskEXIT_CRITICAL();\r
328 \r
329         /* A value is returned for calling semantic consistency with previous\r
330         versions. */\r
331         return pdPASS;\r
332 }\r
333 /*-----------------------------------------------------------*/\r
334 \r
335 static Queue_t *prvAllocateQueueMemory( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t **ppucQueueStorage, StaticQueue_t *pxStaticQueue )\r
336 {\r
337 Queue_t *pxNewQueue;\r
338 size_t xQueueSizeInBytes;\r
339 \r
340         configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
341 \r
342         #if( ( configASSERT_DEFINED == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
343         {\r
344                 /* Sanity check that the size of the structure used to declare a\r
345                 variable of type StaticQueue_t or StaticSemaphore_t equals the size of \r
346                 the real queue and semaphore structures. */\r
347                 volatile size_t xSize = sizeof( StaticQueue_t );\r
348                 configASSERT( xSize == sizeof( Queue_t ) );\r
349         }\r
350         #endif /* configASSERT_DEFINED */\r
351 \r
352         if( uxItemSize == ( UBaseType_t ) 0 )\r
353         {\r
354                 /* There is not going to be a queue storage area. */\r
355                 xQueueSizeInBytes = ( size_t ) 0;\r
356         }\r
357         else\r
358         {\r
359                 /* Allocate enough space to hold the maximum number of items that can be\r
360                 in the queue at any time. */\r
361                 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
362         }\r
363 \r
364         #if( configSUPPORT_STATIC_ALLOCATION == 0 )\r
365         {\r
366                 /* Allocate the new queue structure and storage area. */\r
367                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
368 \r
369                 if( pxNewQueue != NULL )\r
370                 {\r
371                         /* Jump past the queue structure to find the location of the queue\r
372                         storage area. */\r
373                         *ppucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );\r
374                 }\r
375 \r
376                 /* The pxStaticQueue parameter is not used.  Remove compiler warnings. */\r
377                 ( void ) pxStaticQueue;\r
378         }\r
379         #else\r
380         {\r
381                 if( pxStaticQueue == NULL )\r
382                 {\r
383                         /* A statically allocated queue was not passed in, so create one\r
384                         dynamically. */\r
385                         pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
386                         pxNewQueue->ucStaticAllocationFlags = 0;\r
387                 }\r
388                 else\r
389                 {\r
390                         /* The address of a statically allocated queue was passed in, use\r
391                         it and note that the queue was not dynamically allocated so there is\r
392                         no attempt to free it again should the queue be deleted. */\r
393                         pxNewQueue = ( Queue_t * ) pxStaticQueue;\r
394                         pxNewQueue->ucStaticAllocationFlags = queueSTATICALLY_ALLOCATED_QUEUE_STRUCT;\r
395                 }\r
396 \r
397                 if( pxNewQueue != NULL )\r
398                 {\r
399                         if( ( *ppucQueueStorage == NULL ) && ( xQueueSizeInBytes > 0 ) )\r
400                         {\r
401                                 /* A statically allocated queue storage area was not passed in,\r
402                                 so allocate the queue storage area dynamically. */\r
403                                 *ppucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );\r
404 \r
405                                 if( *ppucQueueStorage == NULL )\r
406                                 {\r
407                                         /* The queue storage area could not be created, so free the\r
408                                         queue structure also. */\r
409                                         if( ( pxNewQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ) == 0 )\r
410                                         {\r
411                                                 vPortFree( ( void * ) pxNewQueue );\r
412                                         }\r
413                                         else\r
414                                         {\r
415                                                 mtCOVERAGE_TEST_MARKER();\r
416                                         }\r
417 \r
418                                         pxNewQueue = NULL;\r
419                                 }\r
420                                 else\r
421                                 {\r
422                                         mtCOVERAGE_TEST_MARKER();\r
423                                 }\r
424                         }\r
425                         else\r
426                         {\r
427                                 /* Note the fact that either the queue storage area was passed\r
428                                 into this function, or the size requirement for the queue\r
429                                 storage area was zero - either way no attempt should be made to\r
430                                 free the queue storage area if the queue is deleted. */\r
431                                 pxNewQueue->ucStaticAllocationFlags |= queueSTATICALLY_ALLOCATED_STORAGE;\r
432                         }\r
433                 }\r
434         }\r
435         #endif\r
436 \r
437         return pxNewQueue;\r
438 }\r
439 /*-----------------------------------------------------------*/\r
440 \r
441 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )\r
442 {\r
443 Queue_t *pxNewQueue;\r
444 \r
445         /* Remove compiler warnings about unused parameters should\r
446         configUSE_TRACE_FACILITY not be set to 1. */\r
447         ( void ) ucQueueType;\r
448 \r
449         /* A queue requires a queue structure and a queue storage area.  These may\r
450         be allocated statically or dynamically, depending on the parameter\r
451         values. */\r
452         pxNewQueue = prvAllocateQueueMemory( uxQueueLength, uxItemSize, &pucQueueStorage, pxStaticQueue );\r
453 \r
454         if( pxNewQueue != NULL )\r
455         {\r
456                 if( uxItemSize == ( UBaseType_t ) 0 )\r
457                 {\r
458                         /* No RAM was allocated for the queue storage area, but PC head\r
459                         cannot be set to NULL because NULL is used as a key to say the queue\r
460                         is used as a mutex.  Therefore just set pcHead to point to the queue\r
461                         as a benign value that is known to be within the memory map. */\r
462                         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
463                 }\r
464                 else\r
465                 {\r
466                         /* Set the head to the start of the queue storage area. */\r
467                         pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;\r
468                 }\r
469 \r
470                 /* Initialise the queue members as described where the queue type is\r
471                 defined. */\r
472                 pxNewQueue->uxLength = uxQueueLength;\r
473                 pxNewQueue->uxItemSize = uxItemSize;\r
474                 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
475 \r
476                 #if ( configUSE_TRACE_FACILITY == 1 )\r
477                 {\r
478                         pxNewQueue->ucQueueType = ucQueueType;\r
479                 }\r
480                 #endif /* configUSE_TRACE_FACILITY */\r
481 \r
482                 #if( configUSE_QUEUE_SETS == 1 )\r
483                 {\r
484                         pxNewQueue->pxQueueSetContainer = NULL;\r
485                 }\r
486                 #endif /* configUSE_QUEUE_SETS */\r
487 \r
488                 traceQUEUE_CREATE( pxNewQueue );\r
489         }\r
490         else\r
491         {\r
492                 mtCOVERAGE_TEST_MARKER();\r
493         }\r
494 \r
495         configASSERT( pxNewQueue );\r
496 \r
497         return ( QueueHandle_t ) pxNewQueue;\r
498 }\r
499 /*-----------------------------------------------------------*/\r
500 \r
501 #if ( configUSE_MUTEXES == 1 )\r
502 \r
503         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
504         {\r
505         Queue_t *pxNewQueue;\r
506 \r
507                 /* Prevent compiler warnings about unused parameters if\r
508                 configUSE_TRACE_FACILITY does not equal 1. */\r
509                 ( void ) ucQueueType;\r
510 \r
511                 /* Allocate the new queue structure. */\r
512                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
513                 if( pxNewQueue != NULL )\r
514                 {\r
515                         /* Information required for priority inheritance. */\r
516                         pxNewQueue->pxMutexHolder = NULL;\r
517                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
518 \r
519                         /* Queues used as a mutex no data is actually copied into or out\r
520                         of the queue. */\r
521                         pxNewQueue->pcWriteTo = NULL;\r
522                         pxNewQueue->u.pcReadFrom = NULL;\r
523 \r
524                         /* Each mutex has a length of 1 (like a binary semaphore) and\r
525                         an item size of 0 as nothing is actually copied into or out\r
526                         of the mutex. */\r
527                         pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
528                         pxNewQueue->uxLength = ( UBaseType_t ) 1U;\r
529                         pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;\r
530                         pxNewQueue->xRxLock = queueUNLOCKED;\r
531                         pxNewQueue->xTxLock = queueUNLOCKED;\r
532 \r
533                         #if ( configUSE_TRACE_FACILITY == 1 )\r
534                         {\r
535                                 pxNewQueue->ucQueueType = ucQueueType;\r
536                         }\r
537                         #endif\r
538 \r
539                         #if ( configUSE_QUEUE_SETS == 1 )\r
540                         {\r
541                                 pxNewQueue->pxQueueSetContainer = NULL;\r
542                         }\r
543                         #endif\r
544 \r
545                         /* Ensure the event queues start with the correct state. */\r
546                         vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );\r
547                         vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );\r
548 \r
549                         traceCREATE_MUTEX( pxNewQueue );\r
550 \r
551                         /* Start with the semaphore in the expected state. */\r
552                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
553                 }\r
554                 else\r
555                 {\r
556                         traceCREATE_MUTEX_FAILED();\r
557                 }\r
558 \r
559                 return pxNewQueue;\r
560         }\r
561 \r
562 #endif /* configUSE_MUTEXES */\r
563 /*-----------------------------------------------------------*/\r
564 \r
565 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
566 \r
567         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
568         {\r
569         void *pxReturn;\r
570 \r
571                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
572                 be called directly.  Note:  This is a good way of determining if the\r
573                 calling task is the mutex holder, but not a good way of determining the\r
574                 identity of the mutex holder, as the holder may change between the\r
575                 following critical section exiting and the function returning. */\r
576                 taskENTER_CRITICAL();\r
577                 {\r
578                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
579                         {\r
580                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
581                         }\r
582                         else\r
583                         {\r
584                                 pxReturn = NULL;\r
585                         }\r
586                 }\r
587                 taskEXIT_CRITICAL();\r
588 \r
589                 return pxReturn;\r
590         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
591 \r
592 #endif\r
593 /*-----------------------------------------------------------*/\r
594 \r
595 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
596 \r
597         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
598         {\r
599         BaseType_t xReturn;\r
600         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
601 \r
602                 configASSERT( pxMutex );\r
603 \r
604                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
605                 change outside of this task.  If this task does not hold the mutex then\r
606                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
607                 this is the only condition we are interested in it does not matter if\r
608                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
609                 mutual exclusion is required to test the pxMutexHolder variable. */\r
610                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
611                 {\r
612                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
613 \r
614                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
615                         the task handle, therefore no underflow check is required.  Also,\r
616                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
617                         there can only be one, no mutual exclusion is required to modify the\r
618                         uxRecursiveCallCount member. */\r
619                         ( pxMutex->u.uxRecursiveCallCount )--;\r
620 \r
621                         /* Have we unwound the call count? */\r
622                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
623                         {\r
624                                 /* Return the mutex.  This will automatically unblock any other\r
625                                 task that might be waiting to access the mutex. */\r
626                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
627                         }\r
628                         else\r
629                         {\r
630                                 mtCOVERAGE_TEST_MARKER();\r
631                         }\r
632 \r
633                         xReturn = pdPASS;\r
634                 }\r
635                 else\r
636                 {\r
637                         /* The mutex cannot be given because the calling task is not the\r
638                         holder. */\r
639                         xReturn = pdFAIL;\r
640 \r
641                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
642                 }\r
643 \r
644                 return xReturn;\r
645         }\r
646 \r
647 #endif /* configUSE_RECURSIVE_MUTEXES */\r
648 /*-----------------------------------------------------------*/\r
649 \r
650 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
651 \r
652         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
653         {\r
654         BaseType_t xReturn;\r
655         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
656 \r
657                 configASSERT( pxMutex );\r
658 \r
659                 /* Comments regarding mutual exclusion as per those within\r
660                 xQueueGiveMutexRecursive(). */\r
661 \r
662                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
663 \r
664                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
665                 {\r
666                         ( pxMutex->u.uxRecursiveCallCount )++;\r
667                         xReturn = pdPASS;\r
668                 }\r
669                 else\r
670                 {\r
671                         xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
672 \r
673                         /* pdPASS will only be returned if the mutex was successfully\r
674                         obtained.  The calling task may have entered the Blocked state\r
675                         before reaching here. */\r
676                         if( xReturn == pdPASS )\r
677                         {\r
678                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
679                         }\r
680                         else\r
681                         {\r
682                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
683                         }\r
684                 }\r
685 \r
686                 return xReturn;\r
687         }\r
688 \r
689 #endif /* configUSE_RECURSIVE_MUTEXES */\r
690 /*-----------------------------------------------------------*/\r
691 \r
692 #if ( configUSE_COUNTING_SEMAPHORES == 1 )\r
693 \r
694         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
695         {\r
696         QueueHandle_t xHandle;\r
697 \r
698                 configASSERT( uxMaxCount != 0 );\r
699                 configASSERT( uxInitialCount <= uxMaxCount );\r
700 \r
701                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, NULL, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
702 \r
703                 if( xHandle != NULL )\r
704                 {\r
705                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
706 \r
707                         traceCREATE_COUNTING_SEMAPHORE();\r
708                 }\r
709                 else\r
710                 {\r
711                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
712                 }\r
713 \r
714                 configASSERT( xHandle );\r
715                 return xHandle;\r
716         }\r
717 \r
718 #endif /* configUSE_COUNTING_SEMAPHORES */\r
719 /*-----------------------------------------------------------*/\r
720 \r
721 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
722 {\r
723 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
724 TimeOut_t xTimeOut;\r
725 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
726 \r
727         configASSERT( pxQueue );\r
728         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
729         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
730         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
731         {\r
732                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
733         }\r
734         #endif\r
735 \r
736 \r
737         /* This function relaxes the coding standard somewhat to allow return\r
738         statements within the function itself.  This is done in the interest\r
739         of execution time efficiency. */\r
740         for( ;; )\r
741         {\r
742                 taskENTER_CRITICAL();\r
743                 {\r
744                         /* Is there room on the queue now?  The running task must be the\r
745                         highest priority task wanting to access the queue.  If the head item\r
746                         in the queue is to be overwritten then it does not matter if the\r
747                         queue is full. */\r
748                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
749                         {\r
750                                 traceQUEUE_SEND( pxQueue );\r
751                                 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
752 \r
753                                 #if ( configUSE_QUEUE_SETS == 1 )\r
754                                 {\r
755                                         if( pxQueue->pxQueueSetContainer != NULL )\r
756                                         {\r
757                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
758                                                 {\r
759                                                         /* The queue is a member of a queue set, and posting\r
760                                                         to the queue set caused a higher priority task to\r
761                                                         unblock. A context switch is required. */\r
762                                                         queueYIELD_IF_USING_PREEMPTION();\r
763                                                 }\r
764                                                 else\r
765                                                 {\r
766                                                         mtCOVERAGE_TEST_MARKER();\r
767                                                 }\r
768                                         }\r
769                                         else\r
770                                         {\r
771                                                 /* If there was a task waiting for data to arrive on the\r
772                                                 queue then unblock it now. */\r
773                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
774                                                 {\r
775                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
776                                                         {\r
777                                                                 /* The unblocked task has a priority higher than\r
778                                                                 our own so yield immediately.  Yes it is ok to\r
779                                                                 do this from within the critical section - the\r
780                                                                 kernel takes care of that. */\r
781                                                                 queueYIELD_IF_USING_PREEMPTION();\r
782                                                         }\r
783                                                         else\r
784                                                         {\r
785                                                                 mtCOVERAGE_TEST_MARKER();\r
786                                                         }\r
787                                                 }\r
788                                                 else if( xYieldRequired != pdFALSE )\r
789                                                 {\r
790                                                         /* This path is a special case that will only get\r
791                                                         executed if the task was holding multiple mutexes\r
792                                                         and the mutexes were given back in an order that is\r
793                                                         different to that in which they were taken. */\r
794                                                         queueYIELD_IF_USING_PREEMPTION();\r
795                                                 }\r
796                                                 else\r
797                                                 {\r
798                                                         mtCOVERAGE_TEST_MARKER();\r
799                                                 }\r
800                                         }\r
801                                 }\r
802                                 #else /* configUSE_QUEUE_SETS */\r
803                                 {\r
804                                         /* If there was a task waiting for data to arrive on the\r
805                                         queue then unblock it now. */\r
806                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
807                                         {\r
808                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
809                                                 {\r
810                                                         /* The unblocked task has a priority higher than\r
811                                                         our own so yield immediately.  Yes it is ok to do\r
812                                                         this from within the critical section - the kernel\r
813                                                         takes care of that. */\r
814                                                         queueYIELD_IF_USING_PREEMPTION();\r
815                                                 }\r
816                                                 else\r
817                                                 {\r
818                                                         mtCOVERAGE_TEST_MARKER();\r
819                                                 }\r
820                                         }\r
821                                         else if( xYieldRequired != pdFALSE )\r
822                                         {\r
823                                                 /* This path is a special case that will only get\r
824                                                 executed if the task was holding multiple mutexes and\r
825                                                 the mutexes were given back in an order that is\r
826                                                 different to that in which they were taken. */\r
827                                                 queueYIELD_IF_USING_PREEMPTION();\r
828                                         }\r
829                                         else\r
830                                         {\r
831                                                 mtCOVERAGE_TEST_MARKER();\r
832                                         }\r
833                                 }\r
834                                 #endif /* configUSE_QUEUE_SETS */\r
835 \r
836                                 taskEXIT_CRITICAL();\r
837                                 return pdPASS;\r
838                         }\r
839                         else\r
840                         {\r
841                                 if( xTicksToWait == ( TickType_t ) 0 )\r
842                                 {\r
843                                         /* The queue was full and no block time is specified (or\r
844                                         the block time has expired) so leave now. */\r
845                                         taskEXIT_CRITICAL();\r
846 \r
847                                         /* Return to the original privilege level before exiting\r
848                                         the function. */\r
849                                         traceQUEUE_SEND_FAILED( pxQueue );\r
850                                         return errQUEUE_FULL;\r
851                                 }\r
852                                 else if( xEntryTimeSet == pdFALSE )\r
853                                 {\r
854                                         /* The queue was full and a block time was specified so\r
855                                         configure the timeout structure. */\r
856                                         vTaskSetTimeOutState( &xTimeOut );\r
857                                         xEntryTimeSet = pdTRUE;\r
858                                 }\r
859                                 else\r
860                                 {\r
861                                         /* Entry time was already set. */\r
862                                         mtCOVERAGE_TEST_MARKER();\r
863                                 }\r
864                         }\r
865                 }\r
866                 taskEXIT_CRITICAL();\r
867 \r
868                 /* Interrupts and other tasks can send to and receive from the queue\r
869                 now the critical section has been exited. */\r
870 \r
871                 vTaskSuspendAll();\r
872                 prvLockQueue( pxQueue );\r
873 \r
874                 /* Update the timeout state to see if it has expired yet. */\r
875                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
876                 {\r
877                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
878                         {\r
879                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
880                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
881 \r
882                                 /* Unlocking the queue means queue events can effect the\r
883                                 event list.  It is possible     that interrupts occurring now\r
884                                 remove this task from the event list again - but as the\r
885                                 scheduler is suspended the task will go onto the pending\r
886                                 ready last instead of the actual ready list. */\r
887                                 prvUnlockQueue( pxQueue );\r
888 \r
889                                 /* Resuming the scheduler will move tasks from the pending\r
890                                 ready list into the ready list - so it is feasible that this\r
891                                 task is already in a ready list before it yields - in which\r
892                                 case the yield will not cause a context switch unless there\r
893                                 is also a higher priority task in the pending ready list. */\r
894                                 if( xTaskResumeAll() == pdFALSE )\r
895                                 {\r
896                                         portYIELD_WITHIN_API();\r
897                                 }\r
898                         }\r
899                         else\r
900                         {\r
901                                 /* Try again. */\r
902                                 prvUnlockQueue( pxQueue );\r
903                                 ( void ) xTaskResumeAll();\r
904                         }\r
905                 }\r
906                 else\r
907                 {\r
908                         /* The timeout has expired. */\r
909                         prvUnlockQueue( pxQueue );\r
910                         ( void ) xTaskResumeAll();\r
911 \r
912                         traceQUEUE_SEND_FAILED( pxQueue );\r
913                         return errQUEUE_FULL;\r
914                 }\r
915         }\r
916 }\r
917 /*-----------------------------------------------------------*/\r
918 \r
919 #if ( configUSE_ALTERNATIVE_API == 1 )\r
920 \r
921         BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )\r
922         {\r
923         BaseType_t xEntryTimeSet = pdFALSE;\r
924         TimeOut_t xTimeOut;\r
925         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
926 \r
927                 configASSERT( pxQueue );\r
928                 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
929 \r
930                 for( ;; )\r
931                 {\r
932                         taskENTER_CRITICAL();\r
933                         {\r
934                                 /* Is there room on the queue now?  To be running we must be\r
935                                 the highest priority task wanting to access the queue. */\r
936                                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
937                                 {\r
938                                         traceQUEUE_SEND( pxQueue );\r
939                                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
940 \r
941                                         /* If there was a task waiting for data to arrive on the\r
942                                         queue then unblock it now. */\r
943                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
944                                         {\r
945                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
946                                                 {\r
947                                                         /* The unblocked task has a priority higher than\r
948                                                         our own so yield immediately. */\r
949                                                         portYIELD_WITHIN_API();\r
950                                                 }\r
951                                                 else\r
952                                                 {\r
953                                                         mtCOVERAGE_TEST_MARKER();\r
954                                                 }\r
955                                         }\r
956                                         else\r
957                                         {\r
958                                                 mtCOVERAGE_TEST_MARKER();\r
959                                         }\r
960 \r
961                                         taskEXIT_CRITICAL();\r
962                                         return pdPASS;\r
963                                 }\r
964                                 else\r
965                                 {\r
966                                         if( xTicksToWait == ( TickType_t ) 0 )\r
967                                         {\r
968                                                 taskEXIT_CRITICAL();\r
969                                                 return errQUEUE_FULL;\r
970                                         }\r
971                                         else if( xEntryTimeSet == pdFALSE )\r
972                                         {\r
973                                                 vTaskSetTimeOutState( &xTimeOut );\r
974                                                 xEntryTimeSet = pdTRUE;\r
975                                         }\r
976                                 }\r
977                         }\r
978                         taskEXIT_CRITICAL();\r
979 \r
980                         taskENTER_CRITICAL();\r
981                         {\r
982                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
983                                 {\r
984                                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
985                                         {\r
986                                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
987                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
988                                                 portYIELD_WITHIN_API();\r
989                                         }\r
990                                         else\r
991                                         {\r
992                                                 mtCOVERAGE_TEST_MARKER();\r
993                                         }\r
994                                 }\r
995                                 else\r
996                                 {\r
997                                         taskEXIT_CRITICAL();\r
998                                         traceQUEUE_SEND_FAILED( pxQueue );\r
999                                         return errQUEUE_FULL;\r
1000                                 }\r
1001                         }\r
1002                         taskEXIT_CRITICAL();\r
1003                 }\r
1004         }\r
1005 \r
1006 #endif /* configUSE_ALTERNATIVE_API */\r
1007 /*-----------------------------------------------------------*/\r
1008 \r
1009 #if ( configUSE_ALTERNATIVE_API == 1 )\r
1010 \r
1011         BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )\r
1012         {\r
1013         BaseType_t xEntryTimeSet = pdFALSE;\r
1014         TimeOut_t xTimeOut;\r
1015         int8_t *pcOriginalReadPosition;\r
1016         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1017 \r
1018                 configASSERT( pxQueue );\r
1019                 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1020 \r
1021                 for( ;; )\r
1022                 {\r
1023                         taskENTER_CRITICAL();\r
1024                         {\r
1025                                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1026                                 {\r
1027                                         /* Remember our read position in case we are just peeking. */\r
1028                                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1029 \r
1030                                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1031 \r
1032                                         if( xJustPeeking == pdFALSE )\r
1033                                         {\r
1034                                                 traceQUEUE_RECEIVE( pxQueue );\r
1035 \r
1036                                                 /* Data is actually being removed (not just peeked). */\r
1037                                                 --( pxQueue->uxMessagesWaiting );\r
1038 \r
1039                                                 #if ( configUSE_MUTEXES == 1 )\r
1040                                                 {\r
1041                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1042                                                         {\r
1043                                                                 /* Record the information required to implement\r
1044                                                                 priority inheritance should it become necessary. */\r
1045                                                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();\r
1046                                                         }\r
1047                                                         else\r
1048                                                         {\r
1049                                                                 mtCOVERAGE_TEST_MARKER();\r
1050                                                         }\r
1051                                                 }\r
1052                                                 #endif\r
1053 \r
1054                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1055                                                 {\r
1056                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1057                                                         {\r
1058                                                                 portYIELD_WITHIN_API();\r
1059                                                         }\r
1060                                                         else\r
1061                                                         {\r
1062                                                                 mtCOVERAGE_TEST_MARKER();\r
1063                                                         }\r
1064                                                 }\r
1065                                         }\r
1066                                         else\r
1067                                         {\r
1068                                                 traceQUEUE_PEEK( pxQueue );\r
1069 \r
1070                                                 /* The data is not being removed, so reset our read\r
1071                                                 pointer. */\r
1072                                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1073 \r
1074                                                 /* The data is being left in the queue, so see if there are\r
1075                                                 any other tasks waiting for the data. */\r
1076                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1077                                                 {\r
1078                                                         /* Tasks that are removed from the event list will get added to\r
1079                                                         the pending ready list as the scheduler is still suspended. */\r
1080                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1081                                                         {\r
1082                                                                 /* The task waiting has a higher priority than this task. */\r
1083                                                                 portYIELD_WITHIN_API();\r
1084                                                         }\r
1085                                                         else\r
1086                                                         {\r
1087                                                                 mtCOVERAGE_TEST_MARKER();\r
1088                                                         }\r
1089                                                 }\r
1090                                                 else\r
1091                                                 {\r
1092                                                         mtCOVERAGE_TEST_MARKER();\r
1093                                                 }\r
1094                                         }\r
1095 \r
1096                                         taskEXIT_CRITICAL();\r
1097                                         return pdPASS;\r
1098                                 }\r
1099                                 else\r
1100                                 {\r
1101                                         if( xTicksToWait == ( TickType_t ) 0 )\r
1102                                         {\r
1103                                                 taskEXIT_CRITICAL();\r
1104                                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1105                                                 return errQUEUE_EMPTY;\r
1106                                         }\r
1107                                         else if( xEntryTimeSet == pdFALSE )\r
1108                                         {\r
1109                                                 vTaskSetTimeOutState( &xTimeOut );\r
1110                                                 xEntryTimeSet = pdTRUE;\r
1111                                         }\r
1112                                 }\r
1113                         }\r
1114                         taskEXIT_CRITICAL();\r
1115 \r
1116                         taskENTER_CRITICAL();\r
1117                         {\r
1118                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1119                                 {\r
1120                                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1121                                         {\r
1122                                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1123 \r
1124                                                 #if ( configUSE_MUTEXES == 1 )\r
1125                                                 {\r
1126                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1127                                                         {\r
1128                                                                 taskENTER_CRITICAL();\r
1129                                                                 {\r
1130                                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1131                                                                 }\r
1132                                                                 taskEXIT_CRITICAL();\r
1133                                                         }\r
1134                                                         else\r
1135                                                         {\r
1136                                                                 mtCOVERAGE_TEST_MARKER();\r
1137                                                         }\r
1138                                                 }\r
1139                                                 #endif\r
1140 \r
1141                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1142                                                 portYIELD_WITHIN_API();\r
1143                                         }\r
1144                                         else\r
1145                                         {\r
1146                                                 mtCOVERAGE_TEST_MARKER();\r
1147                                         }\r
1148                                 }\r
1149                                 else\r
1150                                 {\r
1151                                         taskEXIT_CRITICAL();\r
1152                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1153                                         return errQUEUE_EMPTY;\r
1154                                 }\r
1155                         }\r
1156                         taskEXIT_CRITICAL();\r
1157                 }\r
1158         }\r
1159 \r
1160 \r
1161 #endif /* configUSE_ALTERNATIVE_API */\r
1162 /*-----------------------------------------------------------*/\r
1163 \r
1164 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
1165 {\r
1166 BaseType_t xReturn;\r
1167 UBaseType_t uxSavedInterruptStatus;\r
1168 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1169 \r
1170         configASSERT( pxQueue );\r
1171         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1172         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
1173 \r
1174         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1175         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1176         above the maximum system call priority are kept permanently enabled, even\r
1177         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1178         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1179         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1180         failure if a FreeRTOS API function is called from an interrupt that has been\r
1181         assigned a priority above the configured maximum system call priority.\r
1182         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1183         that have been assigned a priority at or (logically) below the maximum\r
1184         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1185         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1186         More information (albeit Cortex-M specific) is provided on the following\r
1187         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1188         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1189 \r
1190         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
1191         in the queue.  Also don't directly wake a task that was blocked on a queue\r
1192         read, instead return a flag to say whether a context switch is required or\r
1193         not (i.e. has a task with a higher priority than us been woken by this\r
1194         post). */\r
1195         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1196         {\r
1197                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
1198                 {\r
1199                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1200 \r
1201                         /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
1202                         semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
1203                         in a task disinheriting a priority and prvCopyDataToQueue() can be\r
1204                         called here even though the disinherit function does not check if\r
1205                         the scheduler is suspended before accessing the ready lists. */\r
1206                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1207 \r
1208                         /* The event list is not altered if the queue is locked.  This will\r
1209                         be done when the queue is unlocked later. */\r
1210                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1211                         {\r
1212                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1213                                 {\r
1214                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1215                                         {\r
1216                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
1217                                                 {\r
1218                                                         /* The queue is a member of a queue set, and posting\r
1219                                                         to the queue set caused a higher priority task to\r
1220                                                         unblock.  A context switch is required. */\r
1221                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1222                                                         {\r
1223                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1224                                                         }\r
1225                                                         else\r
1226                                                         {\r
1227                                                                 mtCOVERAGE_TEST_MARKER();\r
1228                                                         }\r
1229                                                 }\r
1230                                                 else\r
1231                                                 {\r
1232                                                         mtCOVERAGE_TEST_MARKER();\r
1233                                                 }\r
1234                                         }\r
1235                                         else\r
1236                                         {\r
1237                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1238                                                 {\r
1239                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1240                                                         {\r
1241                                                                 /* The task waiting has a higher priority so\r
1242                                                                 record that a context switch is required. */\r
1243                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1244                                                                 {\r
1245                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1246                                                                 }\r
1247                                                                 else\r
1248                                                                 {\r
1249                                                                         mtCOVERAGE_TEST_MARKER();\r
1250                                                                 }\r
1251                                                         }\r
1252                                                         else\r
1253                                                         {\r
1254                                                                 mtCOVERAGE_TEST_MARKER();\r
1255                                                         }\r
1256                                                 }\r
1257                                                 else\r
1258                                                 {\r
1259                                                         mtCOVERAGE_TEST_MARKER();\r
1260                                                 }\r
1261                                         }\r
1262                                 }\r
1263                                 #else /* configUSE_QUEUE_SETS */\r
1264                                 {\r
1265                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1266                                         {\r
1267                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1268                                                 {\r
1269                                                         /* The task waiting has a higher priority so record that a\r
1270                                                         context switch is required. */\r
1271                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1272                                                         {\r
1273                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1274                                                         }\r
1275                                                         else\r
1276                                                         {\r
1277                                                                 mtCOVERAGE_TEST_MARKER();\r
1278                                                         }\r
1279                                                 }\r
1280                                                 else\r
1281                                                 {\r
1282                                                         mtCOVERAGE_TEST_MARKER();\r
1283                                                 }\r
1284                                         }\r
1285                                         else\r
1286                                         {\r
1287                                                 mtCOVERAGE_TEST_MARKER();\r
1288                                         }\r
1289                                 }\r
1290                                 #endif /* configUSE_QUEUE_SETS */\r
1291                         }\r
1292                         else\r
1293                         {\r
1294                                 /* Increment the lock count so the task that unlocks the queue\r
1295                                 knows that data was posted while it was locked. */\r
1296                                 ++( pxQueue->xTxLock );\r
1297                         }\r
1298 \r
1299                         xReturn = pdPASS;\r
1300                 }\r
1301                 else\r
1302                 {\r
1303                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1304                         xReturn = errQUEUE_FULL;\r
1305                 }\r
1306         }\r
1307         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1308 \r
1309         return xReturn;\r
1310 }\r
1311 /*-----------------------------------------------------------*/\r
1312 \r
1313 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1314 {\r
1315 BaseType_t xReturn;\r
1316 UBaseType_t uxSavedInterruptStatus;\r
1317 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1318 \r
1319         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1320         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1321         read, instead return a flag to say whether a context switch is required or\r
1322         not (i.e. has a task with a higher priority than us been woken by this\r
1323         post). */\r
1324 \r
1325         configASSERT( pxQueue );\r
1326 \r
1327         /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
1328         if the item size is not 0. */\r
1329         configASSERT( pxQueue->uxItemSize == 0 );\r
1330 \r
1331         /* Normally a mutex would not be given from an interrupt, especially if\r
1332         there is a mutex holder, as priority inheritance makes no sense for an\r
1333         interrupts, only tasks. */\r
1334         configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );\r
1335 \r
1336         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1337         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1338         above the maximum system call priority are kept permanently enabled, even\r
1339         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1340         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1341         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1342         failure if a FreeRTOS API function is called from an interrupt that has been\r
1343         assigned a priority above the configured maximum system call priority.\r
1344         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1345         that have been assigned a priority at or (logically) below the maximum\r
1346         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1347         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1348         More information (albeit Cortex-M specific) is provided on the following\r
1349         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1350         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1351 \r
1352         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1353         {\r
1354                 /* When the queue is used to implement a semaphore no data is ever\r
1355                 moved through the queue but it is still valid to see if the queue 'has\r
1356                 space'. */\r
1357                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
1358                 {\r
1359                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1360 \r
1361                         /* A task can only have an inherited priority if it is a mutex\r
1362                         holder - and if there is a mutex holder then the mutex cannot be\r
1363                         given from an ISR.  As this is the ISR version of the function it\r
1364                         can be assumed there is no mutex holder and no need to determine if\r
1365                         priority disinheritance is needed.  Simply increase the count of\r
1366                         messages (semaphores) available. */\r
1367                         ++( pxQueue->uxMessagesWaiting );\r
1368 \r
1369                         /* The event list is not altered if the queue is locked.  This will\r
1370                         be done when the queue is unlocked later. */\r
1371                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1372                         {\r
1373                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1374                                 {\r
1375                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1376                                         {\r
1377                                                 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1378                                                 {\r
1379                                                         /* The semaphore is a member of a queue set, and\r
1380                                                         posting to the queue set caused a higher priority\r
1381                                                         task to unblock.  A context switch is required. */\r
1382                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1383                                                         {\r
1384                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1385                                                         }\r
1386                                                         else\r
1387                                                         {\r
1388                                                                 mtCOVERAGE_TEST_MARKER();\r
1389                                                         }\r
1390                                                 }\r
1391                                                 else\r
1392                                                 {\r
1393                                                         mtCOVERAGE_TEST_MARKER();\r
1394                                                 }\r
1395                                         }\r
1396                                         else\r
1397                                         {\r
1398                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1399                                                 {\r
1400                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1401                                                         {\r
1402                                                                 /* The task waiting has a higher priority so\r
1403                                                                 record that a context switch is required. */\r
1404                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1405                                                                 {\r
1406                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1407                                                                 }\r
1408                                                                 else\r
1409                                                                 {\r
1410                                                                         mtCOVERAGE_TEST_MARKER();\r
1411                                                                 }\r
1412                                                         }\r
1413                                                         else\r
1414                                                         {\r
1415                                                                 mtCOVERAGE_TEST_MARKER();\r
1416                                                         }\r
1417                                                 }\r
1418                                                 else\r
1419                                                 {\r
1420                                                         mtCOVERAGE_TEST_MARKER();\r
1421                                                 }\r
1422                                         }\r
1423                                 }\r
1424                                 #else /* configUSE_QUEUE_SETS */\r
1425                                 {\r
1426                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1427                                         {\r
1428                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1429                                                 {\r
1430                                                         /* The task waiting has a higher priority so record that a\r
1431                                                         context switch is required. */\r
1432                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1433                                                         {\r
1434                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1435                                                         }\r
1436                                                         else\r
1437                                                         {\r
1438                                                                 mtCOVERAGE_TEST_MARKER();\r
1439                                                         }\r
1440                                                 }\r
1441                                                 else\r
1442                                                 {\r
1443                                                         mtCOVERAGE_TEST_MARKER();\r
1444                                                 }\r
1445                                         }\r
1446                                         else\r
1447                                         {\r
1448                                                 mtCOVERAGE_TEST_MARKER();\r
1449                                         }\r
1450                                 }\r
1451                                 #endif /* configUSE_QUEUE_SETS */\r
1452                         }\r
1453                         else\r
1454                         {\r
1455                                 /* Increment the lock count so the task that unlocks the queue\r
1456                                 knows that data was posted while it was locked. */\r
1457                                 ++( pxQueue->xTxLock );\r
1458                         }\r
1459 \r
1460                         xReturn = pdPASS;\r
1461                 }\r
1462                 else\r
1463                 {\r
1464                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1465                         xReturn = errQUEUE_FULL;\r
1466                 }\r
1467         }\r
1468         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1469 \r
1470         return xReturn;\r
1471 }\r
1472 /*-----------------------------------------------------------*/\r
1473 \r
1474 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1475 {\r
1476 BaseType_t xEntryTimeSet = pdFALSE;\r
1477 TimeOut_t xTimeOut;\r
1478 int8_t *pcOriginalReadPosition;\r
1479 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1480 \r
1481         configASSERT( pxQueue );\r
1482         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1483         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1484         {\r
1485                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1486         }\r
1487         #endif\r
1488 \r
1489         /* This function relaxes the coding standard somewhat to allow return\r
1490         statements within the function itself.  This is done in the interest\r
1491         of execution time efficiency. */\r
1492 \r
1493         for( ;; )\r
1494         {\r
1495                 taskENTER_CRITICAL();\r
1496                 {\r
1497                         /* Is there data in the queue now?  To be running the calling task\r
1498                         must be the highest priority task wanting to access the queue. */\r
1499                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1500                         {\r
1501                                 /* Remember the read position in case the queue is only being\r
1502                                 peeked. */\r
1503                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1504 \r
1505                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1506 \r
1507                                 if( xJustPeeking == pdFALSE )\r
1508                                 {\r
1509                                         traceQUEUE_RECEIVE( pxQueue );\r
1510 \r
1511                                         /* Actually removing data, not just peeking. */\r
1512                                         --( pxQueue->uxMessagesWaiting );\r
1513 \r
1514                                         #if ( configUSE_MUTEXES == 1 )\r
1515                                         {\r
1516                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1517                                                 {\r
1518                                                         /* Record the information required to implement\r
1519                                                         priority inheritance should it become necessary. */\r
1520                                                         pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1521                                                 }\r
1522                                                 else\r
1523                                                 {\r
1524                                                         mtCOVERAGE_TEST_MARKER();\r
1525                                                 }\r
1526                                         }\r
1527                                         #endif /* configUSE_MUTEXES */\r
1528 \r
1529                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1530                                         {\r
1531                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1532                                                 {\r
1533                                                         queueYIELD_IF_USING_PREEMPTION();\r
1534                                                 }\r
1535                                                 else\r
1536                                                 {\r
1537                                                         mtCOVERAGE_TEST_MARKER();\r
1538                                                 }\r
1539                                         }\r
1540                                         else\r
1541                                         {\r
1542                                                 mtCOVERAGE_TEST_MARKER();\r
1543                                         }\r
1544                                 }\r
1545                                 else\r
1546                                 {\r
1547                                         traceQUEUE_PEEK( pxQueue );\r
1548 \r
1549                                         /* The data is not being removed, so reset the read\r
1550                                         pointer. */\r
1551                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1552 \r
1553                                         /* The data is being left in the queue, so see if there are\r
1554                                         any other tasks waiting for the data. */\r
1555                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1556                                         {\r
1557                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1558                                                 {\r
1559                                                         /* The task waiting has a higher priority than this task. */\r
1560                                                         queueYIELD_IF_USING_PREEMPTION();\r
1561                                                 }\r
1562                                                 else\r
1563                                                 {\r
1564                                                         mtCOVERAGE_TEST_MARKER();\r
1565                                                 }\r
1566                                         }\r
1567                                         else\r
1568                                         {\r
1569                                                 mtCOVERAGE_TEST_MARKER();\r
1570                                         }\r
1571                                 }\r
1572 \r
1573                                 taskEXIT_CRITICAL();\r
1574                                 return pdPASS;\r
1575                         }\r
1576                         else\r
1577                         {\r
1578                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1579                                 {\r
1580                                         /* The queue was empty and no block time is specified (or\r
1581                                         the block time has expired) so leave now. */\r
1582                                         taskEXIT_CRITICAL();\r
1583                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1584                                         return errQUEUE_EMPTY;\r
1585                                 }\r
1586                                 else if( xEntryTimeSet == pdFALSE )\r
1587                                 {\r
1588                                         /* The queue was empty and a block time was specified so\r
1589                                         configure the timeout structure. */\r
1590                                         vTaskSetTimeOutState( &xTimeOut );\r
1591                                         xEntryTimeSet = pdTRUE;\r
1592                                 }\r
1593                                 else\r
1594                                 {\r
1595                                         /* Entry time was already set. */\r
1596                                         mtCOVERAGE_TEST_MARKER();\r
1597                                 }\r
1598                         }\r
1599                 }\r
1600                 taskEXIT_CRITICAL();\r
1601 \r
1602                 /* Interrupts and other tasks can send to and receive from the queue\r
1603                 now the critical section has been exited. */\r
1604 \r
1605                 vTaskSuspendAll();\r
1606                 prvLockQueue( pxQueue );\r
1607 \r
1608                 /* Update the timeout state to see if it has expired yet. */\r
1609                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1610                 {\r
1611                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1612                         {\r
1613                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1614 \r
1615                                 #if ( configUSE_MUTEXES == 1 )\r
1616                                 {\r
1617                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1618                                         {\r
1619                                                 taskENTER_CRITICAL();\r
1620                                                 {\r
1621                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1622                                                 }\r
1623                                                 taskEXIT_CRITICAL();\r
1624                                         }\r
1625                                         else\r
1626                                         {\r
1627                                                 mtCOVERAGE_TEST_MARKER();\r
1628                                         }\r
1629                                 }\r
1630                                 #endif\r
1631 \r
1632                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1633                                 prvUnlockQueue( pxQueue );\r
1634                                 if( xTaskResumeAll() == pdFALSE )\r
1635                                 {\r
1636                                         portYIELD_WITHIN_API();\r
1637                                 }\r
1638                                 else\r
1639                                 {\r
1640                                         mtCOVERAGE_TEST_MARKER();\r
1641                                 }\r
1642                         }\r
1643                         else\r
1644                         {\r
1645                                 /* Try again. */\r
1646                                 prvUnlockQueue( pxQueue );\r
1647                                 ( void ) xTaskResumeAll();\r
1648                         }\r
1649                 }\r
1650                 else\r
1651                 {\r
1652                         prvUnlockQueue( pxQueue );\r
1653                         ( void ) xTaskResumeAll();\r
1654 \r
1655                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1656                         {\r
1657                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1658                                 return errQUEUE_EMPTY;\r
1659                         }\r
1660                         else\r
1661                         {\r
1662                                 mtCOVERAGE_TEST_MARKER();\r
1663                         }\r
1664                 }\r
1665         }\r
1666 }\r
1667 /*-----------------------------------------------------------*/\r
1668 \r
1669 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1670 {\r
1671 BaseType_t xReturn;\r
1672 UBaseType_t uxSavedInterruptStatus;\r
1673 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1674 \r
1675         configASSERT( pxQueue );\r
1676         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1677 \r
1678         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1679         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1680         above the maximum system call priority are kept permanently enabled, even\r
1681         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1682         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1683         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1684         failure if a FreeRTOS API function is called from an interrupt that has been\r
1685         assigned a priority above the configured maximum system call priority.\r
1686         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1687         that have been assigned a priority at or (logically) below the maximum\r
1688         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1689         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1690         More information (albeit Cortex-M specific) is provided on the following\r
1691         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1692         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1693 \r
1694         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1695         {\r
1696                 /* Cannot block in an ISR, so check there is data available. */\r
1697                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1698                 {\r
1699                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1700 \r
1701                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1702                         --( pxQueue->uxMessagesWaiting );\r
1703 \r
1704                         /* If the queue is locked the event list will not be modified.\r
1705                         Instead update the lock count so the task that unlocks the queue\r
1706                         will know that an ISR has removed data while the queue was\r
1707                         locked. */\r
1708                         if( pxQueue->xRxLock == queueUNLOCKED )\r
1709                         {\r
1710                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1711                                 {\r
1712                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1713                                         {\r
1714                                                 /* The task waiting has a higher priority than us so\r
1715                                                 force a context switch. */\r
1716                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1717                                                 {\r
1718                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1719                                                 }\r
1720                                                 else\r
1721                                                 {\r
1722                                                         mtCOVERAGE_TEST_MARKER();\r
1723                                                 }\r
1724                                         }\r
1725                                         else\r
1726                                         {\r
1727                                                 mtCOVERAGE_TEST_MARKER();\r
1728                                         }\r
1729                                 }\r
1730                                 else\r
1731                                 {\r
1732                                         mtCOVERAGE_TEST_MARKER();\r
1733                                 }\r
1734                         }\r
1735                         else\r
1736                         {\r
1737                                 /* Increment the lock count so the task that unlocks the queue\r
1738                                 knows that data was removed while it was locked. */\r
1739                                 ++( pxQueue->xRxLock );\r
1740                         }\r
1741 \r
1742                         xReturn = pdPASS;\r
1743                 }\r
1744                 else\r
1745                 {\r
1746                         xReturn = pdFAIL;\r
1747                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1748                 }\r
1749         }\r
1750         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1751 \r
1752         return xReturn;\r
1753 }\r
1754 /*-----------------------------------------------------------*/\r
1755 \r
1756 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1757 {\r
1758 BaseType_t xReturn;\r
1759 UBaseType_t uxSavedInterruptStatus;\r
1760 int8_t *pcOriginalReadPosition;\r
1761 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1762 \r
1763         configASSERT( pxQueue );\r
1764         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1765         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1766 \r
1767         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1768         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1769         above the maximum system call priority are kept permanently enabled, even\r
1770         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1771         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1772         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1773         failure if a FreeRTOS API function is called from an interrupt that has been\r
1774         assigned a priority above the configured maximum system call priority.\r
1775         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1776         that have been assigned a priority at or (logically) below the maximum\r
1777         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1778         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1779         More information (albeit Cortex-M specific) is provided on the following\r
1780         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1781         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1782 \r
1783         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1784         {\r
1785                 /* Cannot block in an ISR, so check there is data available. */\r
1786                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1787                 {\r
1788                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1789 \r
1790                         /* Remember the read position so it can be reset as nothing is\r
1791                         actually being removed from the queue. */\r
1792                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1793                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1794                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1795 \r
1796                         xReturn = pdPASS;\r
1797                 }\r
1798                 else\r
1799                 {\r
1800                         xReturn = pdFAIL;\r
1801                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1802                 }\r
1803         }\r
1804         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1805 \r
1806         return xReturn;\r
1807 }\r
1808 /*-----------------------------------------------------------*/\r
1809 \r
1810 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1811 {\r
1812 UBaseType_t uxReturn;\r
1813 \r
1814         configASSERT( xQueue );\r
1815 \r
1816         taskENTER_CRITICAL();\r
1817         {\r
1818                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1819         }\r
1820         taskEXIT_CRITICAL();\r
1821 \r
1822         return uxReturn;\r
1823 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1824 /*-----------------------------------------------------------*/\r
1825 \r
1826 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1827 {\r
1828 UBaseType_t uxReturn;\r
1829 Queue_t *pxQueue;\r
1830 \r
1831         pxQueue = ( Queue_t * ) xQueue;\r
1832         configASSERT( pxQueue );\r
1833 \r
1834         taskENTER_CRITICAL();\r
1835         {\r
1836                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1837         }\r
1838         taskEXIT_CRITICAL();\r
1839 \r
1840         return uxReturn;\r
1841 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1842 /*-----------------------------------------------------------*/\r
1843 \r
1844 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1845 {\r
1846 UBaseType_t uxReturn;\r
1847 \r
1848         configASSERT( xQueue );\r
1849 \r
1850         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1851 \r
1852         return uxReturn;\r
1853 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1854 /*-----------------------------------------------------------*/\r
1855 \r
1856 void vQueueDelete( QueueHandle_t xQueue )\r
1857 {\r
1858 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1859 \r
1860         configASSERT( pxQueue );\r
1861 \r
1862         traceQUEUE_DELETE( pxQueue );\r
1863         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1864         {\r
1865                 vQueueUnregisterQueue( pxQueue );\r
1866         }\r
1867         #endif\r
1868 \r
1869         #if( configSUPPORT_STATIC_ALLOCATION == 0 )\r
1870         {\r
1871                 /* The queue and the queue storage area will have been dynamically\r
1872                 allocated in one go. */\r
1873                 vPortFree( pxQueue );\r
1874         }\r
1875         #else\r
1876         {\r
1877                 if( ( pxQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_STORAGE ) == 0 )\r
1878                 {\r
1879                         /* The queue storage area was dynamically allocated, so must be\r
1880                         freed. */\r
1881                         vPortFree( pxQueue->pcHead );\r
1882                 }\r
1883                 else\r
1884                 {\r
1885                         mtCOVERAGE_TEST_MARKER();\r
1886                 }\r
1887 \r
1888                 if( ( pxQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ) == 0 )\r
1889                 {\r
1890                         /* The queue structure was dynamically allocated, so must be\r
1891                         free. */\r
1892                         vPortFree( pxQueue );\r
1893                 }\r
1894                 else\r
1895                 {\r
1896                         mtCOVERAGE_TEST_MARKER();\r
1897                 }\r
1898         }\r
1899         #endif\r
1900 }\r
1901 /*-----------------------------------------------------------*/\r
1902 \r
1903 #if ( configUSE_TRACE_FACILITY == 1 )\r
1904 \r
1905         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1906         {\r
1907                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1908         }\r
1909 \r
1910 #endif /* configUSE_TRACE_FACILITY */\r
1911 /*-----------------------------------------------------------*/\r
1912 \r
1913 #if ( configUSE_TRACE_FACILITY == 1 )\r
1914 \r
1915         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1916         {\r
1917                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1918         }\r
1919 \r
1920 #endif /* configUSE_TRACE_FACILITY */\r
1921 /*-----------------------------------------------------------*/\r
1922 \r
1923 #if ( configUSE_TRACE_FACILITY == 1 )\r
1924 \r
1925         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1926         {\r
1927                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1928         }\r
1929 \r
1930 #endif /* configUSE_TRACE_FACILITY */\r
1931 /*-----------------------------------------------------------*/\r
1932 \r
1933 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1934 {\r
1935 BaseType_t xReturn = pdFALSE;\r
1936 \r
1937         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1938         {\r
1939                 #if ( configUSE_MUTEXES == 1 )\r
1940                 {\r
1941                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1942                         {\r
1943                                 /* The mutex is no longer being held. */\r
1944                                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1945                                 pxQueue->pxMutexHolder = NULL;\r
1946                         }\r
1947                         else\r
1948                         {\r
1949                                 mtCOVERAGE_TEST_MARKER();\r
1950                         }\r
1951                 }\r
1952                 #endif /* configUSE_MUTEXES */\r
1953         }\r
1954         else if( xPosition == queueSEND_TO_BACK )\r
1955         {\r
1956                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1957                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1958                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1959                 {\r
1960                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1961                 }\r
1962                 else\r
1963                 {\r
1964                         mtCOVERAGE_TEST_MARKER();\r
1965                 }\r
1966         }\r
1967         else\r
1968         {\r
1969                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1970                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1971                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1972                 {\r
1973                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1974                 }\r
1975                 else\r
1976                 {\r
1977                         mtCOVERAGE_TEST_MARKER();\r
1978                 }\r
1979 \r
1980                 if( xPosition == queueOVERWRITE )\r
1981                 {\r
1982                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1983                         {\r
1984                                 /* An item is not being added but overwritten, so subtract\r
1985                                 one from the recorded number of items in the queue so when\r
1986                                 one is added again below the number of recorded items remains\r
1987                                 correct. */\r
1988                                 --( pxQueue->uxMessagesWaiting );\r
1989                         }\r
1990                         else\r
1991                         {\r
1992                                 mtCOVERAGE_TEST_MARKER();\r
1993                         }\r
1994                 }\r
1995                 else\r
1996                 {\r
1997                         mtCOVERAGE_TEST_MARKER();\r
1998                 }\r
1999         }\r
2000 \r
2001         ++( pxQueue->uxMessagesWaiting );\r
2002 \r
2003         return xReturn;\r
2004 }\r
2005 /*-----------------------------------------------------------*/\r
2006 \r
2007 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
2008 {\r
2009         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
2010         {\r
2011                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2012                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
2013                 {\r
2014                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2015                 }\r
2016                 else\r
2017                 {\r
2018                         mtCOVERAGE_TEST_MARKER();\r
2019                 }\r
2020                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
2021         }\r
2022 }\r
2023 /*-----------------------------------------------------------*/\r
2024 \r
2025 static void prvUnlockQueue( Queue_t * const pxQueue )\r
2026 {\r
2027         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
2028 \r
2029         /* The lock counts contains the number of extra data items placed or\r
2030         removed from the queue while the queue was locked.  When a queue is\r
2031         locked items can be added or removed, but the event lists cannot be\r
2032         updated. */\r
2033         taskENTER_CRITICAL();\r
2034         {\r
2035                 /* See if data was added to the queue while it was locked. */\r
2036                 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
2037                 {\r
2038                         /* Data was posted while the queue was locked.  Are any tasks\r
2039                         blocked waiting for data to become available? */\r
2040                         #if ( configUSE_QUEUE_SETS == 1 )\r
2041                         {\r
2042                                 if( pxQueue->pxQueueSetContainer != NULL )\r
2043                                 {\r
2044                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
2045                                         {\r
2046                                                 /* The queue is a member of a queue set, and posting to\r
2047                                                 the queue set caused a higher priority task to unblock.\r
2048                                                 A context switch is required. */\r
2049                                                 vTaskMissedYield();\r
2050                                         }\r
2051                                         else\r
2052                                         {\r
2053                                                 mtCOVERAGE_TEST_MARKER();\r
2054                                         }\r
2055                                 }\r
2056                                 else\r
2057                                 {\r
2058                                         /* Tasks that are removed from the event list will get added to\r
2059                                         the pending ready list as the scheduler is still suspended. */\r
2060                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2061                                         {\r
2062                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2063                                                 {\r
2064                                                         /* The task waiting has a higher priority so record that a\r
2065                                                         context switch is required. */\r
2066                                                         vTaskMissedYield();\r
2067                                                 }\r
2068                                                 else\r
2069                                                 {\r
2070                                                         mtCOVERAGE_TEST_MARKER();\r
2071                                                 }\r
2072                                         }\r
2073                                         else\r
2074                                         {\r
2075                                                 break;\r
2076                                         }\r
2077                                 }\r
2078                         }\r
2079                         #else /* configUSE_QUEUE_SETS */\r
2080                         {\r
2081                                 /* Tasks that are removed from the event list will get added to\r
2082                                 the pending ready list as the scheduler is still suspended. */\r
2083                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2084                                 {\r
2085                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2086                                         {\r
2087                                                 /* The task waiting has a higher priority so record that\r
2088                                                 a context switch is required. */\r
2089                                                 vTaskMissedYield();\r
2090                                         }\r
2091                                         else\r
2092                                         {\r
2093                                                 mtCOVERAGE_TEST_MARKER();\r
2094                                         }\r
2095                                 }\r
2096                                 else\r
2097                                 {\r
2098                                         break;\r
2099                                 }\r
2100                         }\r
2101                         #endif /* configUSE_QUEUE_SETS */\r
2102 \r
2103                         --( pxQueue->xTxLock );\r
2104                 }\r
2105 \r
2106                 pxQueue->xTxLock = queueUNLOCKED;\r
2107         }\r
2108         taskEXIT_CRITICAL();\r
2109 \r
2110         /* Do the same for the Rx lock. */\r
2111         taskENTER_CRITICAL();\r
2112         {\r
2113                 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
2114                 {\r
2115                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2116                         {\r
2117                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2118                                 {\r
2119                                         vTaskMissedYield();\r
2120                                 }\r
2121                                 else\r
2122                                 {\r
2123                                         mtCOVERAGE_TEST_MARKER();\r
2124                                 }\r
2125 \r
2126                                 --( pxQueue->xRxLock );\r
2127                         }\r
2128                         else\r
2129                         {\r
2130                                 break;\r
2131                         }\r
2132                 }\r
2133 \r
2134                 pxQueue->xRxLock = queueUNLOCKED;\r
2135         }\r
2136         taskEXIT_CRITICAL();\r
2137 }\r
2138 /*-----------------------------------------------------------*/\r
2139 \r
2140 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
2141 {\r
2142 BaseType_t xReturn;\r
2143 \r
2144         taskENTER_CRITICAL();\r
2145         {\r
2146                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
2147                 {\r
2148                         xReturn = pdTRUE;\r
2149                 }\r
2150                 else\r
2151                 {\r
2152                         xReturn = pdFALSE;\r
2153                 }\r
2154         }\r
2155         taskEXIT_CRITICAL();\r
2156 \r
2157         return xReturn;\r
2158 }\r
2159 /*-----------------------------------------------------------*/\r
2160 \r
2161 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
2162 {\r
2163 BaseType_t xReturn;\r
2164 \r
2165         configASSERT( xQueue );\r
2166         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2167         {\r
2168                 xReturn = pdTRUE;\r
2169         }\r
2170         else\r
2171         {\r
2172                 xReturn = pdFALSE;\r
2173         }\r
2174 \r
2175         return xReturn;\r
2176 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2177 /*-----------------------------------------------------------*/\r
2178 \r
2179 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
2180 {\r
2181 BaseType_t xReturn;\r
2182 \r
2183         taskENTER_CRITICAL();\r
2184         {\r
2185                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2186                 {\r
2187                         xReturn = pdTRUE;\r
2188                 }\r
2189                 else\r
2190                 {\r
2191                         xReturn = pdFALSE;\r
2192                 }\r
2193         }\r
2194         taskEXIT_CRITICAL();\r
2195 \r
2196         return xReturn;\r
2197 }\r
2198 /*-----------------------------------------------------------*/\r
2199 \r
2200 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2201 {\r
2202 BaseType_t xReturn;\r
2203 \r
2204         configASSERT( xQueue );\r
2205         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
2206         {\r
2207                 xReturn = pdTRUE;\r
2208         }\r
2209         else\r
2210         {\r
2211                 xReturn = pdFALSE;\r
2212         }\r
2213 \r
2214         return xReturn;\r
2215 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2216 /*-----------------------------------------------------------*/\r
2217 \r
2218 #if ( configUSE_CO_ROUTINES == 1 )\r
2219 \r
2220         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2221         {\r
2222         BaseType_t xReturn;\r
2223         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2224 \r
2225                 /* If the queue is already full we may have to block.  A critical section\r
2226                 is required to prevent an interrupt removing something from the queue\r
2227                 between the check to see if the queue is full and blocking on the queue. */\r
2228                 portDISABLE_INTERRUPTS();\r
2229                 {\r
2230                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2231                         {\r
2232                                 /* The queue is full - do we want to block or just leave without\r
2233                                 posting? */\r
2234                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2235                                 {\r
2236                                         /* As this is called from a coroutine we cannot block directly, but\r
2237                                         return indicating that we need to block. */\r
2238                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2239                                         portENABLE_INTERRUPTS();\r
2240                                         return errQUEUE_BLOCKED;\r
2241                                 }\r
2242                                 else\r
2243                                 {\r
2244                                         portENABLE_INTERRUPTS();\r
2245                                         return errQUEUE_FULL;\r
2246                                 }\r
2247                         }\r
2248                 }\r
2249                 portENABLE_INTERRUPTS();\r
2250 \r
2251                 portDISABLE_INTERRUPTS();\r
2252                 {\r
2253                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2254                         {\r
2255                                 /* There is room in the queue, copy the data into the queue. */\r
2256                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2257                                 xReturn = pdPASS;\r
2258 \r
2259                                 /* Were any co-routines waiting for data to become available? */\r
2260                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2261                                 {\r
2262                                         /* In this instance the co-routine could be placed directly\r
2263                                         into the ready list as we are within a critical section.\r
2264                                         Instead the same pending ready list mechanism is used as if\r
2265                                         the event were caused from within an interrupt. */\r
2266                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2267                                         {\r
2268                                                 /* The co-routine waiting has a higher priority so record\r
2269                                                 that a yield might be appropriate. */\r
2270                                                 xReturn = errQUEUE_YIELD;\r
2271                                         }\r
2272                                         else\r
2273                                         {\r
2274                                                 mtCOVERAGE_TEST_MARKER();\r
2275                                         }\r
2276                                 }\r
2277                                 else\r
2278                                 {\r
2279                                         mtCOVERAGE_TEST_MARKER();\r
2280                                 }\r
2281                         }\r
2282                         else\r
2283                         {\r
2284                                 xReturn = errQUEUE_FULL;\r
2285                         }\r
2286                 }\r
2287                 portENABLE_INTERRUPTS();\r
2288 \r
2289                 return xReturn;\r
2290         }\r
2291 \r
2292 #endif /* configUSE_CO_ROUTINES */\r
2293 /*-----------------------------------------------------------*/\r
2294 \r
2295 #if ( configUSE_CO_ROUTINES == 1 )\r
2296 \r
2297         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2298         {\r
2299         BaseType_t xReturn;\r
2300         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2301 \r
2302                 /* If the queue is already empty we may have to block.  A critical section\r
2303                 is required to prevent an interrupt adding something to the queue\r
2304                 between the check to see if the queue is empty and blocking on the queue. */\r
2305                 portDISABLE_INTERRUPTS();\r
2306                 {\r
2307                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2308                         {\r
2309                                 /* There are no messages in the queue, do we want to block or just\r
2310                                 leave with nothing? */\r
2311                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2312                                 {\r
2313                                         /* As this is a co-routine we cannot block directly, but return\r
2314                                         indicating that we need to block. */\r
2315                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2316                                         portENABLE_INTERRUPTS();\r
2317                                         return errQUEUE_BLOCKED;\r
2318                                 }\r
2319                                 else\r
2320                                 {\r
2321                                         portENABLE_INTERRUPTS();\r
2322                                         return errQUEUE_FULL;\r
2323                                 }\r
2324                         }\r
2325                         else\r
2326                         {\r
2327                                 mtCOVERAGE_TEST_MARKER();\r
2328                         }\r
2329                 }\r
2330                 portENABLE_INTERRUPTS();\r
2331 \r
2332                 portDISABLE_INTERRUPTS();\r
2333                 {\r
2334                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2335                         {\r
2336                                 /* Data is available from the queue. */\r
2337                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2338                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2339                                 {\r
2340                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2341                                 }\r
2342                                 else\r
2343                                 {\r
2344                                         mtCOVERAGE_TEST_MARKER();\r
2345                                 }\r
2346                                 --( pxQueue->uxMessagesWaiting );\r
2347                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2348 \r
2349                                 xReturn = pdPASS;\r
2350 \r
2351                                 /* Were any co-routines waiting for space to become available? */\r
2352                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2353                                 {\r
2354                                         /* In this instance the co-routine could be placed directly\r
2355                                         into the ready list as we are within a critical section.\r
2356                                         Instead the same pending ready list mechanism is used as if\r
2357                                         the event were caused from within an interrupt. */\r
2358                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2359                                         {\r
2360                                                 xReturn = errQUEUE_YIELD;\r
2361                                         }\r
2362                                         else\r
2363                                         {\r
2364                                                 mtCOVERAGE_TEST_MARKER();\r
2365                                         }\r
2366                                 }\r
2367                                 else\r
2368                                 {\r
2369                                         mtCOVERAGE_TEST_MARKER();\r
2370                                 }\r
2371                         }\r
2372                         else\r
2373                         {\r
2374                                 xReturn = pdFAIL;\r
2375                         }\r
2376                 }\r
2377                 portENABLE_INTERRUPTS();\r
2378 \r
2379                 return xReturn;\r
2380         }\r
2381 \r
2382 #endif /* configUSE_CO_ROUTINES */\r
2383 /*-----------------------------------------------------------*/\r
2384 \r
2385 #if ( configUSE_CO_ROUTINES == 1 )\r
2386 \r
2387         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2388         {\r
2389         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2390 \r
2391                 /* Cannot block within an ISR so if there is no space on the queue then\r
2392                 exit without doing anything. */\r
2393                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2394                 {\r
2395                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2396 \r
2397                         /* We only want to wake one co-routine per ISR, so check that a\r
2398                         co-routine has not already been woken. */\r
2399                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2400                         {\r
2401                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2402                                 {\r
2403                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2404                                         {\r
2405                                                 return pdTRUE;\r
2406                                         }\r
2407                                         else\r
2408                                         {\r
2409                                                 mtCOVERAGE_TEST_MARKER();\r
2410                                         }\r
2411                                 }\r
2412                                 else\r
2413                                 {\r
2414                                         mtCOVERAGE_TEST_MARKER();\r
2415                                 }\r
2416                         }\r
2417                         else\r
2418                         {\r
2419                                 mtCOVERAGE_TEST_MARKER();\r
2420                         }\r
2421                 }\r
2422                 else\r
2423                 {\r
2424                         mtCOVERAGE_TEST_MARKER();\r
2425                 }\r
2426 \r
2427                 return xCoRoutinePreviouslyWoken;\r
2428         }\r
2429 \r
2430 #endif /* configUSE_CO_ROUTINES */\r
2431 /*-----------------------------------------------------------*/\r
2432 \r
2433 #if ( configUSE_CO_ROUTINES == 1 )\r
2434 \r
2435         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2436         {\r
2437         BaseType_t xReturn;\r
2438         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2439 \r
2440                 /* We cannot block from an ISR, so check there is data available. If\r
2441                 not then just leave without doing anything. */\r
2442                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2443                 {\r
2444                         /* Copy the data from the queue. */\r
2445                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2446                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2447                         {\r
2448                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2449                         }\r
2450                         else\r
2451                         {\r
2452                                 mtCOVERAGE_TEST_MARKER();\r
2453                         }\r
2454                         --( pxQueue->uxMessagesWaiting );\r
2455                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2456 \r
2457                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2458                         {\r
2459                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2460                                 {\r
2461                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2462                                         {\r
2463                                                 *pxCoRoutineWoken = pdTRUE;\r
2464                                         }\r
2465                                         else\r
2466                                         {\r
2467                                                 mtCOVERAGE_TEST_MARKER();\r
2468                                         }\r
2469                                 }\r
2470                                 else\r
2471                                 {\r
2472                                         mtCOVERAGE_TEST_MARKER();\r
2473                                 }\r
2474                         }\r
2475                         else\r
2476                         {\r
2477                                 mtCOVERAGE_TEST_MARKER();\r
2478                         }\r
2479 \r
2480                         xReturn = pdPASS;\r
2481                 }\r
2482                 else\r
2483                 {\r
2484                         xReturn = pdFAIL;\r
2485                 }\r
2486 \r
2487                 return xReturn;\r
2488         }\r
2489 \r
2490 #endif /* configUSE_CO_ROUTINES */\r
2491 /*-----------------------------------------------------------*/\r
2492 \r
2493 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2494 \r
2495         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2496         {\r
2497         UBaseType_t ux;\r
2498 \r
2499                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2500                 a free slot. */\r
2501                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2502                 {\r
2503                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2504                         {\r
2505                                 /* Store the information on this queue. */\r
2506                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2507                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2508 \r
2509                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2510                                 break;\r
2511                         }\r
2512                         else\r
2513                         {\r
2514                                 mtCOVERAGE_TEST_MARKER();\r
2515                         }\r
2516                 }\r
2517         }\r
2518 \r
2519 #endif /* configQUEUE_REGISTRY_SIZE */\r
2520 /*-----------------------------------------------------------*/\r
2521 \r
2522 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2523 \r
2524         const char *pcQueueGetQueueName( QueueHandle_t xQueue )\r
2525         {\r
2526         UBaseType_t ux;\r
2527         const char *pcReturn = NULL;\r
2528 \r
2529                 /* Note there is nothing here to protect against another task adding or\r
2530                 removing entries from the registry while it is being searched. */\r
2531                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2532                 {\r
2533                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2534                         {\r
2535                                 pcReturn = xQueueRegistry[ ux ].pcQueueName;\r
2536                                 break;\r
2537                         }\r
2538                         else\r
2539                         {\r
2540                                 mtCOVERAGE_TEST_MARKER();\r
2541                         }\r
2542                 }\r
2543 \r
2544                 return pcReturn;\r
2545         }\r
2546 \r
2547 #endif /* configQUEUE_REGISTRY_SIZE */\r
2548 /*-----------------------------------------------------------*/\r
2549 \r
2550 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2551 \r
2552         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2553         {\r
2554         UBaseType_t ux;\r
2555 \r
2556                 /* See if the handle of the queue being unregistered in actually in the\r
2557                 registry. */\r
2558                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2559                 {\r
2560                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2561                         {\r
2562                                 /* Set the name to NULL to show that this slot if free again. */\r
2563                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2564 \r
2565                                 /* Set the handle to NULL to ensure the same queue handle cannot\r
2566                                 appear in the registry twice if it is added, removed, then\r
2567                                 added again. */\r
2568                                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;\r
2569                                 break;\r
2570                         }\r
2571                         else\r
2572                         {\r
2573                                 mtCOVERAGE_TEST_MARKER();\r
2574                         }\r
2575                 }\r
2576 \r
2577         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2578 \r
2579 #endif /* configQUEUE_REGISTRY_SIZE */\r
2580 /*-----------------------------------------------------------*/\r
2581 \r
2582 #if ( configUSE_TIMERS == 1 )\r
2583 \r
2584         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
2585         {\r
2586         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2587 \r
2588                 /* This function should not be called by application code hence the\r
2589                 'Restricted' in its name.  It is not part of the public API.  It is\r
2590                 designed for use by kernel code, and has special calling requirements.\r
2591                 It can result in vListInsert() being called on a list that can only\r
2592                 possibly ever have one item in it, so the list will be fast, but even\r
2593                 so it should be called with the scheduler locked and not from a critical\r
2594                 section. */\r
2595 \r
2596                 /* Only do anything if there are no messages in the queue.  This function\r
2597                 will not actually cause the task to block, just place it on a blocked\r
2598                 list.  It will not block until the scheduler is unlocked - at which\r
2599                 time a yield will be performed.  If an item is added to the queue while\r
2600                 the queue is locked, and the calling task blocks on the queue, then the\r
2601                 calling task will be immediately unblocked when the queue is unlocked. */\r
2602                 prvLockQueue( pxQueue );\r
2603                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2604                 {\r
2605                         /* There is nothing in the queue, block for the specified period. */\r
2606                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
2607                 }\r
2608                 else\r
2609                 {\r
2610                         mtCOVERAGE_TEST_MARKER();\r
2611                 }\r
2612                 prvUnlockQueue( pxQueue );\r
2613         }\r
2614 \r
2615 #endif /* configUSE_TIMERS */\r
2616 /*-----------------------------------------------------------*/\r
2617 \r
2618 #if ( configUSE_QUEUE_SETS == 1 )\r
2619 \r
2620         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2621         {\r
2622         QueueSetHandle_t pxQueue;\r
2623 \r
2624                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), NULL, NULL, queueQUEUE_TYPE_SET );\r
2625 \r
2626                 return pxQueue;\r
2627         }\r
2628 \r
2629 #endif /* configUSE_QUEUE_SETS */\r
2630 /*-----------------------------------------------------------*/\r
2631 \r
2632 #if ( configUSE_QUEUE_SETS == 1 )\r
2633 \r
2634         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2635         {\r
2636         BaseType_t xReturn;\r
2637 \r
2638                 taskENTER_CRITICAL();\r
2639                 {\r
2640                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2641                         {\r
2642                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2643                                 xReturn = pdFAIL;\r
2644                         }\r
2645                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2646                         {\r
2647                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2648                                 items in the queue/semaphore. */\r
2649                                 xReturn = pdFAIL;\r
2650                         }\r
2651                         else\r
2652                         {\r
2653                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2654                                 xReturn = pdPASS;\r
2655                         }\r
2656                 }\r
2657                 taskEXIT_CRITICAL();\r
2658 \r
2659                 return xReturn;\r
2660         }\r
2661 \r
2662 #endif /* configUSE_QUEUE_SETS */\r
2663 /*-----------------------------------------------------------*/\r
2664 \r
2665 #if ( configUSE_QUEUE_SETS == 1 )\r
2666 \r
2667         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2668         {\r
2669         BaseType_t xReturn;\r
2670         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2671 \r
2672                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2673                 {\r
2674                         /* The queue was not a member of the set. */\r
2675                         xReturn = pdFAIL;\r
2676                 }\r
2677                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2678                 {\r
2679                         /* It is dangerous to remove a queue from a set when the queue is\r
2680                         not empty because the queue set will still hold pending events for\r
2681                         the queue. */\r
2682                         xReturn = pdFAIL;\r
2683                 }\r
2684                 else\r
2685                 {\r
2686                         taskENTER_CRITICAL();\r
2687                         {\r
2688                                 /* The queue is no longer contained in the set. */\r
2689                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2690                         }\r
2691                         taskEXIT_CRITICAL();\r
2692                         xReturn = pdPASS;\r
2693                 }\r
2694 \r
2695                 return xReturn;\r
2696         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2697 \r
2698 #endif /* configUSE_QUEUE_SETS */\r
2699 /*-----------------------------------------------------------*/\r
2700 \r
2701 #if ( configUSE_QUEUE_SETS == 1 )\r
2702 \r
2703         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2704         {\r
2705         QueueSetMemberHandle_t xReturn = NULL;\r
2706 \r
2707                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2708                 return xReturn;\r
2709         }\r
2710 \r
2711 #endif /* configUSE_QUEUE_SETS */\r
2712 /*-----------------------------------------------------------*/\r
2713 \r
2714 #if ( configUSE_QUEUE_SETS == 1 )\r
2715 \r
2716         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2717         {\r
2718         QueueSetMemberHandle_t xReturn = NULL;\r
2719 \r
2720                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2721                 return xReturn;\r
2722         }\r
2723 \r
2724 #endif /* configUSE_QUEUE_SETS */\r
2725 /*-----------------------------------------------------------*/\r
2726 \r
2727 #if ( configUSE_QUEUE_SETS == 1 )\r
2728 \r
2729         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2730         {\r
2731         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2732         BaseType_t xReturn = pdFALSE;\r
2733 \r
2734                 /* This function must be called form a critical section. */\r
2735 \r
2736                 configASSERT( pxQueueSetContainer );\r
2737                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2738 \r
2739                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2740                 {\r
2741                         traceQUEUE_SEND( pxQueueSetContainer );\r
2742 \r
2743                         /* The data copied is the handle of the queue that contains data. */\r
2744                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2745 \r
2746                         if( pxQueueSetContainer->xTxLock == queueUNLOCKED )\r
2747                         {\r
2748                                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2749                                 {\r
2750                                         if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2751                                         {\r
2752                                                 /* The task waiting has a higher priority. */\r
2753                                                 xReturn = pdTRUE;\r
2754                                         }\r
2755                                         else\r
2756                                         {\r
2757                                                 mtCOVERAGE_TEST_MARKER();\r
2758                                         }\r
2759                                 }\r
2760                                 else\r
2761                                 {\r
2762                                         mtCOVERAGE_TEST_MARKER();\r
2763                                 }\r
2764                         }\r
2765                         else\r
2766                         {\r
2767                                 ( pxQueueSetContainer->xTxLock )++;\r
2768                         }\r
2769                 }\r
2770                 else\r
2771                 {\r
2772                         mtCOVERAGE_TEST_MARKER();\r
2773                 }\r
2774 \r
2775                 return xReturn;\r
2776         }\r
2777 \r
2778 #endif /* configUSE_QUEUE_SETS */\r
2779 \r
2780 \r
2781 \r
2782 \r
2783 \r
2784 \r
2785 \r
2786 \r
2787 \r
2788 \r
2789 \r
2790 \r