]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Update version numbers in preparation for V8.2.0 release candidate 1.
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V8.2.0rc1 - Copyright (C) 2014 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     This file is part of the FreeRTOS distribution.\r
8 \r
9     FreeRTOS is free software; you can redistribute it and/or modify it under\r
10     the terms of the GNU General Public License (version 2) as published by the\r
11     Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
12 \r
13     >>!   NOTE: The modification to the GPL is included to allow you to     !<<\r
14     >>!   distribute a combined work that includes FreeRTOS without being   !<<\r
15     >>!   obliged to provide the source code for proprietary components     !<<\r
16     >>!   outside of the FreeRTOS kernel.                                   !<<\r
17 \r
18     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
19     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
20     FOR A PARTICULAR PURPOSE.  Full license text is available on the following\r
21     link: http://www.freertos.org/a00114.html\r
22 \r
23     1 tab == 4 spaces!\r
24 \r
25     ***************************************************************************\r
26      *                                                                       *\r
27      *    Having a problem?  Start by reading the FAQ "My application does   *\r
28      *    not run, what could be wrong?".  Have you defined configASSERT()?  *\r
29      *                                                                       *\r
30      *    http://www.FreeRTOS.org/FAQHelp.html                               *\r
31      *                                                                       *\r
32     ***************************************************************************\r
33 \r
34     ***************************************************************************\r
35      *                                                                       *\r
36      *    FreeRTOS provides completely free yet professionally developed,    *\r
37      *    robust, strictly quality controlled, supported, and cross          *\r
38      *    platform software that is more than just the market leader, it     *\r
39      *    is the industry's de facto standard.                               *\r
40      *                                                                       *\r
41      *    Help yourself get started quickly while simultaneously helping     *\r
42      *    to support the FreeRTOS project by purchasing a FreeRTOS           *\r
43      *    tutorial book, reference manual, or both:                          *\r
44      *    http://www.FreeRTOS.org/Documentation                              *\r
45      *                                                                       *\r
46     ***************************************************************************\r
47 \r
48     ***************************************************************************\r
49      *                                                                       *\r
50      *   Investing in training allows your team to be as productive as       *\r
51      *   possible as early as possible, lowering your overall development    *\r
52      *   cost, and enabling you to bring a more robust product to market     *\r
53      *   earlier than would otherwise be possible.  Richard Barry is both    *\r
54      *   the architect and key author of FreeRTOS, and so also the world's   *\r
55      *   leading authority on what is the world's most popular real time     *\r
56      *   kernel for deeply embedded MCU designs.  Obtaining your training    *\r
57      *   from Richard ensures your team will gain directly from his in-depth *\r
58      *   product knowledge and years of usage experience.  Contact Real Time *\r
59      *   Engineers Ltd to enquire about the FreeRTOS Masterclass, presented  *\r
60      *   by Richard Barry:  http://www.FreeRTOS.org/contact\r
61      *                                                                       *\r
62     ***************************************************************************\r
63 \r
64     ***************************************************************************\r
65      *                                                                       *\r
66      *    You are receiving this top quality software for free.  Please play *\r
67      *    fair and reciprocate by reporting any suspected issues and         *\r
68      *    participating in the community forum:                              *\r
69      *    http://www.FreeRTOS.org/support                                    *\r
70      *                                                                       *\r
71      *    Thank you!                                                         *\r
72      *                                                                       *\r
73     ***************************************************************************\r
74 \r
75     http://www.FreeRTOS.org - Documentation, books, training, latest versions,\r
76     license and Real Time Engineers Ltd. contact details.\r
77 \r
78     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
79     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
80     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
81 \r
82     http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
83     Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
84 \r
85     http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High\r
86     Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
87     licenses offer ticketed support, indemnification and commercial middleware.\r
88 \r
89     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
90     engineered and independently SIL3 certified version for use in safety and\r
91     mission critical applications that require provable dependability.\r
92 \r
93     1 tab == 4 spaces!\r
94 */\r
95 \r
96 #include <stdlib.h>\r
97 #include <string.h>\r
98 \r
99 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
100 all the API functions to use the MPU wrappers.  That should only be done when\r
101 task.h is included from an application file. */\r
102 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
103 \r
104 #include "FreeRTOS.h"\r
105 #include "task.h"\r
106 #include "queue.h"\r
107 \r
108 #if ( configUSE_CO_ROUTINES == 1 )\r
109         #include "croutine.h"\r
110 #endif\r
111 \r
112 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
113 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
114 header files above, but not in this file, in order to generate the correct\r
115 privileged Vs unprivileged linkage and placement. */\r
116 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
117 \r
118 \r
119 /* Constants used with the xRxLock and xTxLock structure members. */\r
120 #define queueUNLOCKED                                   ( ( BaseType_t ) -1 )\r
121 #define queueLOCKED_UNMODIFIED                  ( ( BaseType_t ) 0 )\r
122 \r
123 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
124 pcTail members are used as pointers into the queue storage area.  When the\r
125 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
126 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
127 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
128 names to the pcHead and pcTail structure members to ensure the readability of\r
129 the code is maintained despite this dual use of two structure members.  An\r
130 alternative implementation would be to use a union, but use of a union is\r
131 against the coding standard (although an exception to the standard has been\r
132 permitted where the dual use also significantly changes the type of the\r
133 structure member). */\r
134 #define pxMutexHolder                                   pcTail\r
135 #define uxQueueType                                             pcHead\r
136 #define queueQUEUE_IS_MUTEX                             NULL\r
137 \r
138 /* Semaphores do not actually store or copy data, so have an item size of\r
139 zero. */\r
140 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
141 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
142 \r
143 #if( configUSE_PREEMPTION == 0 )\r
144         /* If the cooperative scheduler is being used then a yield should not be\r
145         performed just because a higher priority task has been woken. */\r
146         #define queueYIELD_IF_USING_PREEMPTION()\r
147 #else\r
148         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
149 #endif\r
150 \r
151 /*\r
152  * Definition of the queue used by the scheduler.\r
153  * Items are queued by copy, not reference.  See the following link for the\r
154  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
155  */\r
156 typedef struct QueueDefinition\r
157 {\r
158         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
159         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
160         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
161 \r
162         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
163         {\r
164                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
165                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
166         } u;\r
167 \r
168         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
169         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
170 \r
171         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
172         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
173         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
174 \r
175         volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
176         volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
177 \r
178         #if ( configUSE_TRACE_FACILITY == 1 )\r
179                 UBaseType_t uxQueueNumber;\r
180                 uint8_t ucQueueType;\r
181         #endif\r
182 \r
183         #if ( configUSE_QUEUE_SETS == 1 )\r
184                 struct QueueDefinition *pxQueueSetContainer;\r
185         #endif\r
186 \r
187 } xQUEUE;\r
188 \r
189 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
190 name below to enable the use of older kernel aware debuggers. */\r
191 typedef xQUEUE Queue_t;\r
192 \r
193 /*-----------------------------------------------------------*/\r
194 \r
195 /*\r
196  * The queue registry is just a means for kernel aware debuggers to locate\r
197  * queue structures.  It has no other purpose so is an optional component.\r
198  */\r
199 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
200 \r
201         /* The type stored within the queue registry array.  This allows a name\r
202         to be assigned to each queue making kernel aware debugging a little\r
203         more user friendly. */\r
204         typedef struct QUEUE_REGISTRY_ITEM\r
205         {\r
206                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
207                 QueueHandle_t xHandle;\r
208         } xQueueRegistryItem;\r
209 \r
210         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
211         new xQueueRegistryItem name below to enable the use of older kernel aware\r
212         debuggers. */\r
213         typedef xQueueRegistryItem QueueRegistryItem_t;\r
214 \r
215         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
216         The pcQueueName member of a structure being NULL is indicative of the\r
217         array position being vacant. */\r
218         QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
219 \r
220 #endif /* configQUEUE_REGISTRY_SIZE */\r
221 \r
222 /*\r
223  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
224  * prevent an ISR from adding or removing items to the queue, but does prevent\r
225  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
226  * queue is locked it will instead increment the appropriate queue lock count\r
227  * to indicate that a task may require unblocking.  When the queue in unlocked\r
228  * these lock counts are inspected, and the appropriate action taken.\r
229  */\r
230 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
231 \r
232 /*\r
233  * Uses a critical section to determine if there is any data in a queue.\r
234  *\r
235  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
236  */\r
237 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
238 \r
239 /*\r
240  * Uses a critical section to determine if there is any space in a queue.\r
241  *\r
242  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
243  */\r
244 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
245 \r
246 /*\r
247  * Copies an item into the queue, either at the front of the queue or the\r
248  * back of the queue.\r
249  */\r
250 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
251 \r
252 /*\r
253  * Copies an item out of a queue.\r
254  */\r
255 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
256 \r
257 #if ( configUSE_QUEUE_SETS == 1 )\r
258         /*\r
259          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
260          * the queue set that the queue contains data.\r
261          */\r
262         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
263 #endif\r
264 \r
265 /*-----------------------------------------------------------*/\r
266 \r
267 /*\r
268  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
269  * accessing the queue event lists.\r
270  */\r
271 #define prvLockQueue( pxQueue )                                                         \\r
272         taskENTER_CRITICAL();                                                                   \\r
273         {                                                                                                               \\r
274                 if( ( pxQueue )->xRxLock == queueUNLOCKED )                     \\r
275                 {                                                                                                       \\r
276                         ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \\r
277                 }                                                                                                       \\r
278                 if( ( pxQueue )->xTxLock == queueUNLOCKED )                     \\r
279                 {                                                                                                       \\r
280                         ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \\r
281                 }                                                                                                       \\r
282         }                                                                                                               \\r
283         taskEXIT_CRITICAL()\r
284 /*-----------------------------------------------------------*/\r
285 \r
286 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
287 {\r
288 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
289 \r
290         configASSERT( pxQueue );\r
291 \r
292         taskENTER_CRITICAL();\r
293         {\r
294                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
295                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
296                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
297                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
298                 pxQueue->xRxLock = queueUNLOCKED;\r
299                 pxQueue->xTxLock = queueUNLOCKED;\r
300 \r
301                 if( xNewQueue == pdFALSE )\r
302                 {\r
303                         /* If there are tasks blocked waiting to read from the queue, then\r
304                         the tasks will remain blocked as after this function exits the queue\r
305                         will still be empty.  If there are tasks blocked waiting to write to\r
306                         the queue, then one should be unblocked as after this function exits\r
307                         it will be possible to write to it. */\r
308                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
309                         {\r
310                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
311                                 {\r
312                                         queueYIELD_IF_USING_PREEMPTION();\r
313                                 }\r
314                                 else\r
315                                 {\r
316                                         mtCOVERAGE_TEST_MARKER();\r
317                                 }\r
318                         }\r
319                         else\r
320                         {\r
321                                 mtCOVERAGE_TEST_MARKER();\r
322                         }\r
323                 }\r
324                 else\r
325                 {\r
326                         /* Ensure the event queues start in the correct state. */\r
327                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
328                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
329                 }\r
330         }\r
331         taskEXIT_CRITICAL();\r
332 \r
333         /* A value is returned for calling semantic consistency with previous\r
334         versions. */\r
335         return pdPASS;\r
336 }\r
337 /*-----------------------------------------------------------*/\r
338 \r
339 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
340 {\r
341 Queue_t *pxNewQueue;\r
342 size_t xQueueSizeInBytes;\r
343 QueueHandle_t xReturn = NULL;\r
344 int8_t *pcAllocatedBuffer;\r
345 \r
346         /* Remove compiler warnings about unused parameters should\r
347         configUSE_TRACE_FACILITY not be set to 1. */\r
348         ( void ) ucQueueType;\r
349 \r
350         configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
351 \r
352         if( uxItemSize == ( UBaseType_t ) 0 )\r
353         {\r
354                 /* There is not going to be a queue storage area. */\r
355                 xQueueSizeInBytes = ( size_t ) 0;\r
356         }\r
357         else\r
358         {\r
359                 /* The queue is one byte longer than asked for to make wrap checking\r
360                 easier/faster. */\r
361                 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
362         }\r
363 \r
364         /* Allocate the new queue structure and storage area. */\r
365         pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
366 \r
367         if( pcAllocatedBuffer != NULL )\r
368         {\r
369                 pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be to small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */\r
370 \r
371                 if( uxItemSize == ( UBaseType_t ) 0 )\r
372                 {\r
373                         /* No RAM was allocated for the queue storage area, but PC head\r
374                         cannot be set to NULL because NULL is used as a key to say the queue\r
375                         is used as a mutex.  Therefore just set pcHead to point to the queue\r
376                         as a benign value that is known to be within the memory map. */\r
377                         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
378                 }\r
379                 else\r
380                 {\r
381                         /* Jump past the queue structure to find the location of the queue\r
382                         storage area - adding the padding bytes to get a better alignment. */\r
383                         pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );\r
384                 }\r
385 \r
386                 /* Initialise the queue members as described above where the queue type\r
387                 is defined. */\r
388                 pxNewQueue->uxLength = uxQueueLength;\r
389                 pxNewQueue->uxItemSize = uxItemSize;\r
390                 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
391 \r
392                 #if ( configUSE_TRACE_FACILITY == 1 )\r
393                 {\r
394                         pxNewQueue->ucQueueType = ucQueueType;\r
395                 }\r
396                 #endif /* configUSE_TRACE_FACILITY */\r
397 \r
398                 #if( configUSE_QUEUE_SETS == 1 )\r
399                 {\r
400                         pxNewQueue->pxQueueSetContainer = NULL;\r
401                 }\r
402                 #endif /* configUSE_QUEUE_SETS */\r
403 \r
404                 traceQUEUE_CREATE( pxNewQueue );\r
405                 xReturn = pxNewQueue;\r
406         }\r
407         else\r
408         {\r
409                 mtCOVERAGE_TEST_MARKER();\r
410         }\r
411 \r
412         configASSERT( xReturn );\r
413 \r
414         return xReturn;\r
415 }\r
416 /*-----------------------------------------------------------*/\r
417 \r
418 #if ( configUSE_MUTEXES == 1 )\r
419 \r
420         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
421         {\r
422         Queue_t *pxNewQueue;\r
423 \r
424                 /* Prevent compiler warnings about unused parameters if\r
425                 configUSE_TRACE_FACILITY does not equal 1. */\r
426                 ( void ) ucQueueType;\r
427 \r
428                 /* Allocate the new queue structure. */\r
429                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
430                 if( pxNewQueue != NULL )\r
431                 {\r
432                         /* Information required for priority inheritance. */\r
433                         pxNewQueue->pxMutexHolder = NULL;\r
434                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
435 \r
436                         /* Queues used as a mutex no data is actually copied into or out\r
437                         of the queue. */\r
438                         pxNewQueue->pcWriteTo = NULL;\r
439                         pxNewQueue->u.pcReadFrom = NULL;\r
440 \r
441                         /* Each mutex has a length of 1 (like a binary semaphore) and\r
442                         an item size of 0 as nothing is actually copied into or out\r
443                         of the mutex. */\r
444                         pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
445                         pxNewQueue->uxLength = ( UBaseType_t ) 1U;\r
446                         pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;\r
447                         pxNewQueue->xRxLock = queueUNLOCKED;\r
448                         pxNewQueue->xTxLock = queueUNLOCKED;\r
449 \r
450                         #if ( configUSE_TRACE_FACILITY == 1 )\r
451                         {\r
452                                 pxNewQueue->ucQueueType = ucQueueType;\r
453                         }\r
454                         #endif\r
455 \r
456                         #if ( configUSE_QUEUE_SETS == 1 )\r
457                         {\r
458                                 pxNewQueue->pxQueueSetContainer = NULL;\r
459                         }\r
460                         #endif\r
461 \r
462                         /* Ensure the event queues start with the correct state. */\r
463                         vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );\r
464                         vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );\r
465 \r
466                         traceCREATE_MUTEX( pxNewQueue );\r
467 \r
468                         /* Start with the semaphore in the expected state. */\r
469                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
470                 }\r
471                 else\r
472                 {\r
473                         traceCREATE_MUTEX_FAILED();\r
474                 }\r
475 \r
476                 configASSERT( pxNewQueue );\r
477                 return pxNewQueue;\r
478         }\r
479 \r
480 #endif /* configUSE_MUTEXES */\r
481 /*-----------------------------------------------------------*/\r
482 \r
483 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
484 \r
485         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
486         {\r
487         void *pxReturn;\r
488 \r
489                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
490                 be called directly.  Note:  This is a good way of determining if the\r
491                 calling task is the mutex holder, but not a good way of determining the\r
492                 identity of the mutex holder, as the holder may change between the\r
493                 following critical section exiting and the function returning. */\r
494                 taskENTER_CRITICAL();\r
495                 {\r
496                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
497                         {\r
498                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
499                         }\r
500                         else\r
501                         {\r
502                                 pxReturn = NULL;\r
503                         }\r
504                 }\r
505                 taskEXIT_CRITICAL();\r
506 \r
507                 return pxReturn;\r
508         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
509 \r
510 #endif\r
511 /*-----------------------------------------------------------*/\r
512 \r
513 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
514 \r
515         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
516         {\r
517         BaseType_t xReturn;\r
518         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
519 \r
520                 configASSERT( pxMutex );\r
521 \r
522                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
523                 change outside of this task.  If this task does not hold the mutex then\r
524                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
525                 this is the only condition we are interested in it does not matter if\r
526                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
527                 mutual exclusion is required to test the pxMutexHolder variable. */\r
528                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
529                 {\r
530                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
531 \r
532                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
533                         the task handle, therefore no underflow check is required.  Also,\r
534                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
535                         there can only be one, no mutual exclusion is required to modify the\r
536                         uxRecursiveCallCount member. */\r
537                         ( pxMutex->u.uxRecursiveCallCount )--;\r
538 \r
539                         /* Have we unwound the call count? */\r
540                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
541                         {\r
542                                 /* Return the mutex.  This will automatically unblock any other\r
543                                 task that might be waiting to access the mutex. */\r
544                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
545                         }\r
546                         else\r
547                         {\r
548                                 mtCOVERAGE_TEST_MARKER();\r
549                         }\r
550 \r
551                         xReturn = pdPASS;\r
552                 }\r
553                 else\r
554                 {\r
555                         /* The mutex cannot be given because the calling task is not the\r
556                         holder. */\r
557                         xReturn = pdFAIL;\r
558 \r
559                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
560                 }\r
561 \r
562                 return xReturn;\r
563         }\r
564 \r
565 #endif /* configUSE_RECURSIVE_MUTEXES */\r
566 /*-----------------------------------------------------------*/\r
567 \r
568 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
569 \r
570         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
571         {\r
572         BaseType_t xReturn;\r
573         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
574 \r
575                 configASSERT( pxMutex );\r
576 \r
577                 /* Comments regarding mutual exclusion as per those within\r
578                 xQueueGiveMutexRecursive(). */\r
579 \r
580                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
581 \r
582                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
583                 {\r
584                         ( pxMutex->u.uxRecursiveCallCount )++;\r
585                         xReturn = pdPASS;\r
586                 }\r
587                 else\r
588                 {\r
589                         xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
590 \r
591                         /* pdPASS will only be returned if the mutex was successfully\r
592                         obtained.  The calling task may have entered the Blocked state\r
593                         before reaching here. */\r
594                         if( xReturn == pdPASS )\r
595                         {\r
596                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
597                         }\r
598                         else\r
599                         {\r
600                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
601                         }\r
602                 }\r
603 \r
604                 return xReturn;\r
605         }\r
606 \r
607 #endif /* configUSE_RECURSIVE_MUTEXES */\r
608 /*-----------------------------------------------------------*/\r
609 \r
610 #if ( configUSE_COUNTING_SEMAPHORES == 1 )\r
611 \r
612         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
613         {\r
614         QueueHandle_t xHandle;\r
615 \r
616                 configASSERT( uxMaxCount != 0 );\r
617                 configASSERT( uxInitialCount <= uxMaxCount );\r
618 \r
619                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
620 \r
621                 if( xHandle != NULL )\r
622                 {\r
623                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
624 \r
625                         traceCREATE_COUNTING_SEMAPHORE();\r
626                 }\r
627                 else\r
628                 {\r
629                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
630                 }\r
631 \r
632                 configASSERT( xHandle );\r
633                 return xHandle;\r
634         }\r
635 \r
636 #endif /* configUSE_COUNTING_SEMAPHORES */\r
637 /*-----------------------------------------------------------*/\r
638 \r
639 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
640 {\r
641 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
642 TimeOut_t xTimeOut;\r
643 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
644 \r
645         configASSERT( pxQueue );\r
646         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
647         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
648         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
649         {\r
650                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
651         }\r
652         #endif\r
653 \r
654 \r
655         /* This function relaxes the coding standard somewhat to allow return\r
656         statements within the function itself.  This is done in the interest\r
657         of execution time efficiency. */\r
658         for( ;; )\r
659         {\r
660                 taskENTER_CRITICAL();\r
661                 {\r
662                         /* Is there room on the queue now?  The running task must be\r
663                         the highest priority task wanting to access the queue.  If\r
664                         the head item in the queue is to be overwritten then it does\r
665                         not matter if the queue is full. */\r
666                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
667                         {\r
668                                 traceQUEUE_SEND( pxQueue );\r
669                                 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
670 \r
671                                 #if ( configUSE_QUEUE_SETS == 1 )\r
672                                 {\r
673                                         if( pxQueue->pxQueueSetContainer != NULL )\r
674                                         {\r
675                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
676                                                 {\r
677                                                         /* The queue is a member of a queue set, and posting\r
678                                                         to the queue set caused a higher priority task to\r
679                                                         unblock. A context switch is required. */\r
680                                                         queueYIELD_IF_USING_PREEMPTION();\r
681                                                 }\r
682                                                 else\r
683                                                 {\r
684                                                         mtCOVERAGE_TEST_MARKER();\r
685                                                 }\r
686                                         }\r
687                                         else\r
688                                         {\r
689                                                 /* If there was a task waiting for data to arrive on the\r
690                                                 queue then unblock it now. */\r
691                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
692                                                 {\r
693                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
694                                                         {\r
695                                                                 /* The unblocked task has a priority higher than\r
696                                                                 our own so yield immediately.  Yes it is ok to\r
697                                                                 do this from within the critical section - the\r
698                                                                 kernel takes care of that. */\r
699                                                                 queueYIELD_IF_USING_PREEMPTION();\r
700                                                         }\r
701                                                         else\r
702                                                         {\r
703                                                                 mtCOVERAGE_TEST_MARKER();\r
704                                                         }\r
705                                                 }\r
706                                                 else if( xYieldRequired != pdFALSE )\r
707                                                 {\r
708                                                         /* This path is a special case that will only get\r
709                                                         executed if the task was holding multiple mutexes\r
710                                                         and the mutexes were given back in an order that is\r
711                                                         different to that in which they were taken. */\r
712                                                         queueYIELD_IF_USING_PREEMPTION();\r
713                                                 }\r
714                                                 else\r
715                                                 {\r
716                                                         mtCOVERAGE_TEST_MARKER();\r
717                                                 }\r
718                                         }\r
719                                 }\r
720                                 #else /* configUSE_QUEUE_SETS */\r
721                                 {\r
722                                         /* If there was a task waiting for data to arrive on the\r
723                                         queue then unblock it now. */\r
724                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
725                                         {\r
726                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
727                                                 {\r
728                                                         /* The unblocked task has a priority higher than\r
729                                                         our own so yield immediately.  Yes it is ok to do\r
730                                                         this from within the critical section - the kernel\r
731                                                         takes care of that. */\r
732                                                         queueYIELD_IF_USING_PREEMPTION();\r
733                                                 }\r
734                                                 else\r
735                                                 {\r
736                                                         mtCOVERAGE_TEST_MARKER();\r
737                                                 }\r
738                                         }\r
739                                         else if( xYieldRequired != pdFALSE )\r
740                                         {\r
741                                                 /* This path is a special case that will only get\r
742                                                 executed if the task was holding multiple mutexes and\r
743                                                 the mutexes were given back in an order that is\r
744                                                 different to that in which they were taken. */\r
745                                                 queueYIELD_IF_USING_PREEMPTION();\r
746                                         }\r
747                                         else\r
748                                         {\r
749                                                 mtCOVERAGE_TEST_MARKER();\r
750                                         }\r
751                                 }\r
752                                 #endif /* configUSE_QUEUE_SETS */\r
753 \r
754                                 taskEXIT_CRITICAL();\r
755                                 return pdPASS;\r
756                         }\r
757                         else\r
758                         {\r
759                                 if( xTicksToWait == ( TickType_t ) 0 )\r
760                                 {\r
761                                         /* The queue was full and no block time is specified (or\r
762                                         the block time has expired) so leave now. */\r
763                                         taskEXIT_CRITICAL();\r
764 \r
765                                         /* Return to the original privilege level before exiting\r
766                                         the function. */\r
767                                         traceQUEUE_SEND_FAILED( pxQueue );\r
768                                         return errQUEUE_FULL;\r
769                                 }\r
770                                 else if( xEntryTimeSet == pdFALSE )\r
771                                 {\r
772                                         /* The queue was full and a block time was specified so\r
773                                         configure the timeout structure. */\r
774                                         vTaskSetTimeOutState( &xTimeOut );\r
775                                         xEntryTimeSet = pdTRUE;\r
776                                 }\r
777                                 else\r
778                                 {\r
779                                         /* Entry time was already set. */\r
780                                         mtCOVERAGE_TEST_MARKER();\r
781                                 }\r
782                         }\r
783                 }\r
784                 taskEXIT_CRITICAL();\r
785 \r
786                 /* Interrupts and other tasks can send to and receive from the queue\r
787                 now the critical section has been exited. */\r
788 \r
789                 vTaskSuspendAll();\r
790                 prvLockQueue( pxQueue );\r
791 \r
792                 /* Update the timeout state to see if it has expired yet. */\r
793                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
794                 {\r
795                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
796                         {\r
797                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
798                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
799 \r
800                                 /* Unlocking the queue means queue events can effect the\r
801                                 event list.  It is possible     that interrupts occurring now\r
802                                 remove this task from the event list again - but as the\r
803                                 scheduler is suspended the task will go onto the pending\r
804                                 ready last instead of the actual ready list. */\r
805                                 prvUnlockQueue( pxQueue );\r
806 \r
807                                 /* Resuming the scheduler will move tasks from the pending\r
808                                 ready list into the ready list - so it is feasible that this\r
809                                 task is already in a ready list before it yields - in which\r
810                                 case the yield will not cause a context switch unless there\r
811                                 is also a higher priority task in the pending ready list. */\r
812                                 if( xTaskResumeAll() == pdFALSE )\r
813                                 {\r
814                                         portYIELD_WITHIN_API();\r
815                                 }\r
816                         }\r
817                         else\r
818                         {\r
819                                 /* Try again. */\r
820                                 prvUnlockQueue( pxQueue );\r
821                                 ( void ) xTaskResumeAll();\r
822                         }\r
823                 }\r
824                 else\r
825                 {\r
826                         /* The timeout has expired. */\r
827                         prvUnlockQueue( pxQueue );\r
828                         ( void ) xTaskResumeAll();\r
829 \r
830                         /* Return to the original privilege level before exiting the\r
831                         function. */\r
832                         traceQUEUE_SEND_FAILED( pxQueue );\r
833                         return errQUEUE_FULL;\r
834                 }\r
835         }\r
836 }\r
837 /*-----------------------------------------------------------*/\r
838 \r
839 #if ( configUSE_ALTERNATIVE_API == 1 )\r
840 \r
841         BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )\r
842         {\r
843         BaseType_t xEntryTimeSet = pdFALSE;\r
844         TimeOut_t xTimeOut;\r
845         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
846 \r
847                 configASSERT( pxQueue );\r
848                 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
849 \r
850                 for( ;; )\r
851                 {\r
852                         taskENTER_CRITICAL();\r
853                         {\r
854                                 /* Is there room on the queue now?  To be running we must be\r
855                                 the highest priority task wanting to access the queue. */\r
856                                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
857                                 {\r
858                                         traceQUEUE_SEND( pxQueue );\r
859                                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
860 \r
861                                         /* If there was a task waiting for data to arrive on the\r
862                                         queue then unblock it now. */\r
863                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
864                                         {\r
865                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
866                                                 {\r
867                                                         /* The unblocked task has a priority higher than\r
868                                                         our own so yield immediately. */\r
869                                                         portYIELD_WITHIN_API();\r
870                                                 }\r
871                                                 else\r
872                                                 {\r
873                                                         mtCOVERAGE_TEST_MARKER();\r
874                                                 }\r
875                                         }\r
876                                         else\r
877                                         {\r
878                                                 mtCOVERAGE_TEST_MARKER();\r
879                                         }\r
880 \r
881                                         taskEXIT_CRITICAL();\r
882                                         return pdPASS;\r
883                                 }\r
884                                 else\r
885                                 {\r
886                                         if( xTicksToWait == ( TickType_t ) 0 )\r
887                                         {\r
888                                                 taskEXIT_CRITICAL();\r
889                                                 return errQUEUE_FULL;\r
890                                         }\r
891                                         else if( xEntryTimeSet == pdFALSE )\r
892                                         {\r
893                                                 vTaskSetTimeOutState( &xTimeOut );\r
894                                                 xEntryTimeSet = pdTRUE;\r
895                                         }\r
896                                 }\r
897                         }\r
898                         taskEXIT_CRITICAL();\r
899 \r
900                         taskENTER_CRITICAL();\r
901                         {\r
902                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
903                                 {\r
904                                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
905                                         {\r
906                                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
907                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
908                                                 portYIELD_WITHIN_API();\r
909                                         }\r
910                                         else\r
911                                         {\r
912                                                 mtCOVERAGE_TEST_MARKER();\r
913                                         }\r
914                                 }\r
915                                 else\r
916                                 {\r
917                                         taskEXIT_CRITICAL();\r
918                                         traceQUEUE_SEND_FAILED( pxQueue );\r
919                                         return errQUEUE_FULL;\r
920                                 }\r
921                         }\r
922                         taskEXIT_CRITICAL();\r
923                 }\r
924         }\r
925 \r
926 #endif /* configUSE_ALTERNATIVE_API */\r
927 /*-----------------------------------------------------------*/\r
928 \r
929 #if ( configUSE_ALTERNATIVE_API == 1 )\r
930 \r
931         BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )\r
932         {\r
933         BaseType_t xEntryTimeSet = pdFALSE;\r
934         TimeOut_t xTimeOut;\r
935         int8_t *pcOriginalReadPosition;\r
936         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
937 \r
938                 configASSERT( pxQueue );\r
939                 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
940 \r
941                 for( ;; )\r
942                 {\r
943                         taskENTER_CRITICAL();\r
944                         {\r
945                                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
946                                 {\r
947                                         /* Remember our read position in case we are just peeking. */\r
948                                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
949 \r
950                                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
951 \r
952                                         if( xJustPeeking == pdFALSE )\r
953                                         {\r
954                                                 traceQUEUE_RECEIVE( pxQueue );\r
955 \r
956                                                 /* Data is actually being removed (not just peeked). */\r
957                                                 --( pxQueue->uxMessagesWaiting );\r
958 \r
959                                                 #if ( configUSE_MUTEXES == 1 )\r
960                                                 {\r
961                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
962                                                         {\r
963                                                                 /* Record the information required to implement\r
964                                                                 priority inheritance should it become necessary. */\r
965                                                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();\r
966                                                         }\r
967                                                         else\r
968                                                         {\r
969                                                                 mtCOVERAGE_TEST_MARKER();\r
970                                                         }\r
971                                                 }\r
972                                                 #endif\r
973 \r
974                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
975                                                 {\r
976                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
977                                                         {\r
978                                                                 portYIELD_WITHIN_API();\r
979                                                         }\r
980                                                         else\r
981                                                         {\r
982                                                                 mtCOVERAGE_TEST_MARKER();\r
983                                                         }\r
984                                                 }\r
985                                         }\r
986                                         else\r
987                                         {\r
988                                                 traceQUEUE_PEEK( pxQueue );\r
989 \r
990                                                 /* The data is not being removed, so reset our read\r
991                                                 pointer. */\r
992                                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
993 \r
994                                                 /* The data is being left in the queue, so see if there are\r
995                                                 any other tasks waiting for the data. */\r
996                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
997                                                 {\r
998                                                         /* Tasks that are removed from the event list will get added to\r
999                                                         the pending ready list as the scheduler is still suspended. */\r
1000                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1001                                                         {\r
1002                                                                 /* The task waiting has a higher priority than this task. */\r
1003                                                                 portYIELD_WITHIN_API();\r
1004                                                         }\r
1005                                                         else\r
1006                                                         {\r
1007                                                                 mtCOVERAGE_TEST_MARKER();\r
1008                                                         }\r
1009                                                 }\r
1010                                                 else\r
1011                                                 {\r
1012                                                         mtCOVERAGE_TEST_MARKER();\r
1013                                                 }\r
1014                                         }\r
1015 \r
1016                                         taskEXIT_CRITICAL();\r
1017                                         return pdPASS;\r
1018                                 }\r
1019                                 else\r
1020                                 {\r
1021                                         if( xTicksToWait == ( TickType_t ) 0 )\r
1022                                         {\r
1023                                                 taskEXIT_CRITICAL();\r
1024                                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1025                                                 return errQUEUE_EMPTY;\r
1026                                         }\r
1027                                         else if( xEntryTimeSet == pdFALSE )\r
1028                                         {\r
1029                                                 vTaskSetTimeOutState( &xTimeOut );\r
1030                                                 xEntryTimeSet = pdTRUE;\r
1031                                         }\r
1032                                 }\r
1033                         }\r
1034                         taskEXIT_CRITICAL();\r
1035 \r
1036                         taskENTER_CRITICAL();\r
1037                         {\r
1038                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1039                                 {\r
1040                                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1041                                         {\r
1042                                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1043 \r
1044                                                 #if ( configUSE_MUTEXES == 1 )\r
1045                                                 {\r
1046                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1047                                                         {\r
1048                                                                 taskENTER_CRITICAL();\r
1049                                                                 {\r
1050                                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1051                                                                 }\r
1052                                                                 taskEXIT_CRITICAL();\r
1053                                                         }\r
1054                                                         else\r
1055                                                         {\r
1056                                                                 mtCOVERAGE_TEST_MARKER();\r
1057                                                         }\r
1058                                                 }\r
1059                                                 #endif\r
1060 \r
1061                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1062                                                 portYIELD_WITHIN_API();\r
1063                                         }\r
1064                                         else\r
1065                                         {\r
1066                                                 mtCOVERAGE_TEST_MARKER();\r
1067                                         }\r
1068                                 }\r
1069                                 else\r
1070                                 {\r
1071                                         taskEXIT_CRITICAL();\r
1072                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1073                                         return errQUEUE_EMPTY;\r
1074                                 }\r
1075                         }\r
1076                         taskEXIT_CRITICAL();\r
1077                 }\r
1078         }\r
1079 \r
1080 \r
1081 #endif /* configUSE_ALTERNATIVE_API */\r
1082 /*-----------------------------------------------------------*/\r
1083 \r
1084 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
1085 {\r
1086 BaseType_t xReturn;\r
1087 UBaseType_t uxSavedInterruptStatus;\r
1088 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1089 \r
1090         configASSERT( pxQueue );\r
1091         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1092         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
1093 \r
1094         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1095         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1096         above the maximum system call priority are kept permanently enabled, even\r
1097         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1098         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1099         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1100         failure if a FreeRTOS API function is called from an interrupt that has been\r
1101         assigned a priority above the configured maximum system call priority.\r
1102         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1103         that have been assigned a priority at or (logically) below the maximum\r
1104         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1105         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1106         More information (albeit Cortex-M specific) is provided on the following\r
1107         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1108         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1109 \r
1110         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
1111         in the queue.  Also don't directly wake a task that was blocked on a queue\r
1112         read, instead return a flag to say whether a context switch is required or\r
1113         not (i.e. has a task with a higher priority than us been woken by this\r
1114         post). */\r
1115         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1116         {\r
1117                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
1118                 {\r
1119                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1120 \r
1121                         /* A task can only have an inherited priority if it is a mutex\r
1122                         holder - and if there is a mutex holder then the mutex cannot be\r
1123                         given from an ISR.  Therefore, unlike the xQueueGenericGive()\r
1124                         function, there is no need to determine the need for priority\r
1125                         disinheritance here or to clear the mutex holder TCB member. */\r
1126                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1127 \r
1128                         /* The event list is not altered if the queue is locked.  This will\r
1129                         be done when the queue is unlocked later. */\r
1130                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1131                         {\r
1132                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1133                                 {\r
1134                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1135                                         {\r
1136                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
1137                                                 {\r
1138                                                         /* The queue is a member of a queue set, and posting\r
1139                                                         to the queue set caused a higher priority task to\r
1140                                                         unblock.  A context switch is required. */\r
1141                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1142                                                         {\r
1143                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1144                                                         }\r
1145                                                         else\r
1146                                                         {\r
1147                                                                 mtCOVERAGE_TEST_MARKER();\r
1148                                                         }\r
1149                                                 }\r
1150                                                 else\r
1151                                                 {\r
1152                                                         mtCOVERAGE_TEST_MARKER();\r
1153                                                 }\r
1154                                         }\r
1155                                         else\r
1156                                         {\r
1157                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1158                                                 {\r
1159                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1160                                                         {\r
1161                                                                 /* The task waiting has a higher priority so\r
1162                                                                 record that a context switch is required. */\r
1163                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1164                                                                 {\r
1165                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1166                                                                 }\r
1167                                                                 else\r
1168                                                                 {\r
1169                                                                         mtCOVERAGE_TEST_MARKER();\r
1170                                                                 }\r
1171                                                         }\r
1172                                                         else\r
1173                                                         {\r
1174                                                                 mtCOVERAGE_TEST_MARKER();\r
1175                                                         }\r
1176                                                 }\r
1177                                                 else\r
1178                                                 {\r
1179                                                         mtCOVERAGE_TEST_MARKER();\r
1180                                                 }\r
1181                                         }\r
1182                                 }\r
1183                                 #else /* configUSE_QUEUE_SETS */\r
1184                                 {\r
1185                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1186                                         {\r
1187                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1188                                                 {\r
1189                                                         /* The task waiting has a higher priority so record that a\r
1190                                                         context switch is required. */\r
1191                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1192                                                         {\r
1193                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1194                                                         }\r
1195                                                         else\r
1196                                                         {\r
1197                                                                 mtCOVERAGE_TEST_MARKER();\r
1198                                                         }\r
1199                                                 }\r
1200                                                 else\r
1201                                                 {\r
1202                                                         mtCOVERAGE_TEST_MARKER();\r
1203                                                 }\r
1204                                         }\r
1205                                         else\r
1206                                         {\r
1207                                                 mtCOVERAGE_TEST_MARKER();\r
1208                                         }\r
1209                                 }\r
1210                                 #endif /* configUSE_QUEUE_SETS */\r
1211                         }\r
1212                         else\r
1213                         {\r
1214                                 /* Increment the lock count so the task that unlocks the queue\r
1215                                 knows that data was posted while it was locked. */\r
1216                                 ++( pxQueue->xTxLock );\r
1217                         }\r
1218 \r
1219                         xReturn = pdPASS;\r
1220                 }\r
1221                 else\r
1222                 {\r
1223                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1224                         xReturn = errQUEUE_FULL;\r
1225                 }\r
1226         }\r
1227         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1228 \r
1229         return xReturn;\r
1230 }\r
1231 /*-----------------------------------------------------------*/\r
1232 \r
1233 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1234 {\r
1235 BaseType_t xReturn;\r
1236 UBaseType_t uxSavedInterruptStatus;\r
1237 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1238 \r
1239         configASSERT( pxQueue );\r
1240 \r
1241         /* xQueueGenericSendFromISR() should be used in the item size is not 0. */\r
1242         configASSERT( pxQueue->uxItemSize == 0 );\r
1243 \r
1244         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1245         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1246         above the maximum system call priority are kept permanently enabled, even\r
1247         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1248         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1249         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1250         failure if a FreeRTOS API function is called from an interrupt that has been\r
1251         assigned a priority above the configured maximum system call priority.\r
1252         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1253         that have been assigned a priority at or (logically) below the maximum\r
1254         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1255         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1256         More information (albeit Cortex-M specific) is provided on the following\r
1257         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1258         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1259 \r
1260         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1261         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1262         read, instead return a flag to say whether a context switch is required or\r
1263         not (i.e. has a task with a higher priority than us been woken by this\r
1264         post). */\r
1265         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1266         {\r
1267                 /* When the queue is used to implement a semaphore no data is ever\r
1268                 moved through the queue but it is still valid to see if the queue 'has\r
1269                 space'. */\r
1270                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
1271                 {\r
1272                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1273 \r
1274                         /* A task can only have an inherited priority if it is a mutex\r
1275                         holder - and if there is a mutex holder then the mutex cannot be\r
1276                         given from an ISR.  Therefore, unlike the xQueueGenericGive()\r
1277                         function, there is no need to determine the need for priority\r
1278                         disinheritance here or to clear the mutex holder TCB member. */\r
1279 \r
1280                         ++( pxQueue->uxMessagesWaiting );\r
1281 \r
1282                         /* The event list is not altered if the queue is locked.  This will\r
1283                         be done when the queue is unlocked later. */\r
1284                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1285                         {\r
1286                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1287                                 {\r
1288                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1289                                         {\r
1290                                                 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1291                                                 {\r
1292                                                         /* The semaphore is a member of a queue set, and\r
1293                                                         posting to the queue set caused a higher priority\r
1294                                                         task to unblock.  A context switch is required. */\r
1295                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1296                                                         {\r
1297                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1298                                                         }\r
1299                                                         else\r
1300                                                         {\r
1301                                                                 mtCOVERAGE_TEST_MARKER();\r
1302                                                         }\r
1303                                                 }\r
1304                                                 else\r
1305                                                 {\r
1306                                                         mtCOVERAGE_TEST_MARKER();\r
1307                                                 }\r
1308                                         }\r
1309                                         else\r
1310                                         {\r
1311                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1312                                                 {\r
1313                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1314                                                         {\r
1315                                                                 /* The task waiting has a higher priority so\r
1316                                                                 record that a context switch is required. */\r
1317                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1318                                                                 {\r
1319                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1320                                                                 }\r
1321                                                                 else\r
1322                                                                 {\r
1323                                                                         mtCOVERAGE_TEST_MARKER();\r
1324                                                                 }\r
1325                                                         }\r
1326                                                         else\r
1327                                                         {\r
1328                                                                 mtCOVERAGE_TEST_MARKER();\r
1329                                                         }\r
1330                                                 }\r
1331                                                 else\r
1332                                                 {\r
1333                                                         mtCOVERAGE_TEST_MARKER();\r
1334                                                 }\r
1335                                         }\r
1336                                 }\r
1337                                 #else /* configUSE_QUEUE_SETS */\r
1338                                 {\r
1339                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1340                                         {\r
1341                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1342                                                 {\r
1343                                                         /* The task waiting has a higher priority so record that a\r
1344                                                         context switch is required. */\r
1345                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1346                                                         {\r
1347                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1348                                                         }\r
1349                                                         else\r
1350                                                         {\r
1351                                                                 mtCOVERAGE_TEST_MARKER();\r
1352                                                         }\r
1353                                                 }\r
1354                                                 else\r
1355                                                 {\r
1356                                                         mtCOVERAGE_TEST_MARKER();\r
1357                                                 }\r
1358                                         }\r
1359                                         else\r
1360                                         {\r
1361                                                 mtCOVERAGE_TEST_MARKER();\r
1362                                         }\r
1363                                 }\r
1364                                 #endif /* configUSE_QUEUE_SETS */\r
1365                         }\r
1366                         else\r
1367                         {\r
1368                                 /* Increment the lock count so the task that unlocks the queue\r
1369                                 knows that data was posted while it was locked. */\r
1370                                 ++( pxQueue->xTxLock );\r
1371                         }\r
1372 \r
1373                         xReturn = pdPASS;\r
1374                 }\r
1375                 else\r
1376                 {\r
1377                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1378                         xReturn = errQUEUE_FULL;\r
1379                 }\r
1380         }\r
1381         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1382 \r
1383         return xReturn;\r
1384 }\r
1385 /*-----------------------------------------------------------*/\r
1386 \r
1387 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1388 {\r
1389 BaseType_t xEntryTimeSet = pdFALSE;\r
1390 TimeOut_t xTimeOut;\r
1391 int8_t *pcOriginalReadPosition;\r
1392 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1393 \r
1394         configASSERT( pxQueue );\r
1395         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1396         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1397         {\r
1398                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1399         }\r
1400         #endif\r
1401 \r
1402         /* This function relaxes the coding standard somewhat to allow return\r
1403         statements within the function itself.  This is done in the interest\r
1404         of execution time efficiency. */\r
1405 \r
1406         for( ;; )\r
1407         {\r
1408                 taskENTER_CRITICAL();\r
1409                 {\r
1410                         /* Is there data in the queue now?  To be running the calling task\r
1411                         must be the highest priority task wanting to access the queue. */\r
1412                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1413                         {\r
1414                                 /* Remember the read position in case the queue is only being\r
1415                                 peeked. */\r
1416                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1417 \r
1418                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1419 \r
1420                                 if( xJustPeeking == pdFALSE )\r
1421                                 {\r
1422                                         traceQUEUE_RECEIVE( pxQueue );\r
1423 \r
1424                                         /* Actually removing data, not just peeking. */\r
1425                                         --( pxQueue->uxMessagesWaiting );\r
1426 \r
1427                                         #if ( configUSE_MUTEXES == 1 )\r
1428                                         {\r
1429                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1430                                                 {\r
1431                                                         /* Record the information required to implement\r
1432                                                         priority inheritance should it become necessary. */\r
1433                                                         pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1434                                                 }\r
1435                                                 else\r
1436                                                 {\r
1437                                                         mtCOVERAGE_TEST_MARKER();\r
1438                                                 }\r
1439                                         }\r
1440                                         #endif /* configUSE_MUTEXES */\r
1441 \r
1442                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1443                                         {\r
1444                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1445                                                 {\r
1446                                                         queueYIELD_IF_USING_PREEMPTION();\r
1447                                                 }\r
1448                                                 else\r
1449                                                 {\r
1450                                                         mtCOVERAGE_TEST_MARKER();\r
1451                                                 }\r
1452                                         }\r
1453                                         else\r
1454                                         {\r
1455                                                 mtCOVERAGE_TEST_MARKER();\r
1456                                         }\r
1457                                 }\r
1458                                 else\r
1459                                 {\r
1460                                         traceQUEUE_PEEK( pxQueue );\r
1461 \r
1462                                         /* The data is not being removed, so reset the read\r
1463                                         pointer. */\r
1464                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1465 \r
1466                                         /* The data is being left in the queue, so see if there are\r
1467                                         any other tasks waiting for the data. */\r
1468                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1469                                         {\r
1470                                                 /* Tasks that are removed from the event list will get added to\r
1471                                                 the pending ready list as the scheduler is still suspended. */\r
1472                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1473                                                 {\r
1474                                                         /* The task waiting has a higher priority than this task. */\r
1475                                                         queueYIELD_IF_USING_PREEMPTION();\r
1476                                                 }\r
1477                                                 else\r
1478                                                 {\r
1479                                                         mtCOVERAGE_TEST_MARKER();\r
1480                                                 }\r
1481                                         }\r
1482                                         else\r
1483                                         {\r
1484                                                 mtCOVERAGE_TEST_MARKER();\r
1485                                         }\r
1486                                 }\r
1487 \r
1488                                 taskEXIT_CRITICAL();\r
1489                                 return pdPASS;\r
1490                         }\r
1491                         else\r
1492                         {\r
1493                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1494                                 {\r
1495                                         /* The queue was empty and no block time is specified (or\r
1496                                         the block time has expired) so leave now. */\r
1497                                         taskEXIT_CRITICAL();\r
1498                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1499                                         return errQUEUE_EMPTY;\r
1500                                 }\r
1501                                 else if( xEntryTimeSet == pdFALSE )\r
1502                                 {\r
1503                                         /* The queue was empty and a block time was specified so\r
1504                                         configure the timeout structure. */\r
1505                                         vTaskSetTimeOutState( &xTimeOut );\r
1506                                         xEntryTimeSet = pdTRUE;\r
1507                                 }\r
1508                                 else\r
1509                                 {\r
1510                                         /* Entry time was already set. */\r
1511                                         mtCOVERAGE_TEST_MARKER();\r
1512                                 }\r
1513                         }\r
1514                 }\r
1515                 taskEXIT_CRITICAL();\r
1516 \r
1517                 /* Interrupts and other tasks can send to and receive from the queue\r
1518                 now the critical section has been exited. */\r
1519 \r
1520                 vTaskSuspendAll();\r
1521                 prvLockQueue( pxQueue );\r
1522 \r
1523                 /* Update the timeout state to see if it has expired yet. */\r
1524                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1525                 {\r
1526                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1527                         {\r
1528                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1529 \r
1530                                 #if ( configUSE_MUTEXES == 1 )\r
1531                                 {\r
1532                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1533                                         {\r
1534                                                 taskENTER_CRITICAL();\r
1535                                                 {\r
1536                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1537                                                 }\r
1538                                                 taskEXIT_CRITICAL();\r
1539                                         }\r
1540                                         else\r
1541                                         {\r
1542                                                 mtCOVERAGE_TEST_MARKER();\r
1543                                         }\r
1544                                 }\r
1545                                 #endif\r
1546 \r
1547                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1548                                 prvUnlockQueue( pxQueue );\r
1549                                 if( xTaskResumeAll() == pdFALSE )\r
1550                                 {\r
1551                                         portYIELD_WITHIN_API();\r
1552                                 }\r
1553                                 else\r
1554                                 {\r
1555                                         mtCOVERAGE_TEST_MARKER();\r
1556                                 }\r
1557                         }\r
1558                         else\r
1559                         {\r
1560                                 /* Try again. */\r
1561                                 prvUnlockQueue( pxQueue );\r
1562                                 ( void ) xTaskResumeAll();\r
1563                         }\r
1564                 }\r
1565                 else\r
1566                 {\r
1567                         prvUnlockQueue( pxQueue );\r
1568                         ( void ) xTaskResumeAll();\r
1569                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1570                         return errQUEUE_EMPTY;\r
1571                 }\r
1572         }\r
1573 }\r
1574 /*-----------------------------------------------------------*/\r
1575 \r
1576 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1577 {\r
1578 BaseType_t xReturn;\r
1579 UBaseType_t uxSavedInterruptStatus;\r
1580 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1581 \r
1582         configASSERT( pxQueue );\r
1583         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1584 \r
1585         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1586         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1587         above the maximum system call priority are kept permanently enabled, even\r
1588         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1589         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1590         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1591         failure if a FreeRTOS API function is called from an interrupt that has been\r
1592         assigned a priority above the configured maximum system call priority.\r
1593         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1594         that have been assigned a priority at or (logically) below the maximum\r
1595         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1596         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1597         More information (albeit Cortex-M specific) is provided on the following\r
1598         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1599         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1600 \r
1601         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1602         {\r
1603                 /* Cannot block in an ISR, so check there is data available. */\r
1604                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1605                 {\r
1606                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1607 \r
1608                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1609                         --( pxQueue->uxMessagesWaiting );\r
1610 \r
1611                         /* If the queue is locked the event list will not be modified.\r
1612                         Instead update the lock count so the task that unlocks the queue\r
1613                         will know that an ISR has removed data while the queue was\r
1614                         locked. */\r
1615                         if( pxQueue->xRxLock == queueUNLOCKED )\r
1616                         {\r
1617                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1618                                 {\r
1619                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1620                                         {\r
1621                                                 /* The task waiting has a higher priority than us so\r
1622                                                 force a context switch. */\r
1623                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1624                                                 {\r
1625                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1626                                                 }\r
1627                                                 else\r
1628                                                 {\r
1629                                                         mtCOVERAGE_TEST_MARKER();\r
1630                                                 }\r
1631                                         }\r
1632                                         else\r
1633                                         {\r
1634                                                 mtCOVERAGE_TEST_MARKER();\r
1635                                         }\r
1636                                 }\r
1637                                 else\r
1638                                 {\r
1639                                         mtCOVERAGE_TEST_MARKER();\r
1640                                 }\r
1641                         }\r
1642                         else\r
1643                         {\r
1644                                 /* Increment the lock count so the task that unlocks the queue\r
1645                                 knows that data was removed while it was locked. */\r
1646                                 ++( pxQueue->xRxLock );\r
1647                         }\r
1648 \r
1649                         xReturn = pdPASS;\r
1650                 }\r
1651                 else\r
1652                 {\r
1653                         xReturn = pdFAIL;\r
1654                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1655                 }\r
1656         }\r
1657         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1658 \r
1659         return xReturn;\r
1660 }\r
1661 /*-----------------------------------------------------------*/\r
1662 \r
1663 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1664 {\r
1665 BaseType_t xReturn;\r
1666 UBaseType_t uxSavedInterruptStatus;\r
1667 int8_t *pcOriginalReadPosition;\r
1668 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1669 \r
1670         configASSERT( pxQueue );\r
1671         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1672         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1673 \r
1674         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1675         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1676         above the maximum system call priority are kept permanently enabled, even\r
1677         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1678         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1679         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1680         failure if a FreeRTOS API function is called from an interrupt that has been\r
1681         assigned a priority above the configured maximum system call priority.\r
1682         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1683         that have been assigned a priority at or (logically) below the maximum\r
1684         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1685         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1686         More information (albeit Cortex-M specific) is provided on the following\r
1687         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1688         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1689 \r
1690         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1691         {\r
1692                 /* Cannot block in an ISR, so check there is data available. */\r
1693                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1694                 {\r
1695                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1696 \r
1697                         /* Remember the read position so it can be reset as nothing is\r
1698                         actually being removed from the queue. */\r
1699                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1700                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1701                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1702 \r
1703                         xReturn = pdPASS;\r
1704                 }\r
1705                 else\r
1706                 {\r
1707                         xReturn = pdFAIL;\r
1708                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1709                 }\r
1710         }\r
1711         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1712 \r
1713         return xReturn;\r
1714 }\r
1715 /*-----------------------------------------------------------*/\r
1716 \r
1717 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1718 {\r
1719 UBaseType_t uxReturn;\r
1720 \r
1721         configASSERT( xQueue );\r
1722 \r
1723         taskENTER_CRITICAL();\r
1724         {\r
1725                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1726         }\r
1727         taskEXIT_CRITICAL();\r
1728 \r
1729         return uxReturn;\r
1730 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1731 /*-----------------------------------------------------------*/\r
1732 \r
1733 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1734 {\r
1735 UBaseType_t uxReturn;\r
1736 Queue_t *pxQueue;\r
1737 \r
1738         pxQueue = ( Queue_t * ) xQueue;\r
1739         configASSERT( pxQueue );\r
1740 \r
1741         taskENTER_CRITICAL();\r
1742         {\r
1743                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1744         }\r
1745         taskEXIT_CRITICAL();\r
1746 \r
1747         return uxReturn;\r
1748 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1749 /*-----------------------------------------------------------*/\r
1750 \r
1751 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1752 {\r
1753 UBaseType_t uxReturn;\r
1754 \r
1755         configASSERT( xQueue );\r
1756 \r
1757         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1758 \r
1759         return uxReturn;\r
1760 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1761 /*-----------------------------------------------------------*/\r
1762 \r
1763 void vQueueDelete( QueueHandle_t xQueue )\r
1764 {\r
1765 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1766 \r
1767         configASSERT( pxQueue );\r
1768 \r
1769         traceQUEUE_DELETE( pxQueue );\r
1770         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1771         {\r
1772                 vQueueUnregisterQueue( pxQueue );\r
1773         }\r
1774         #endif\r
1775         vPortFree( pxQueue );\r
1776 }\r
1777 /*-----------------------------------------------------------*/\r
1778 \r
1779 #if ( configUSE_TRACE_FACILITY == 1 )\r
1780 \r
1781         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1782         {\r
1783                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1784         }\r
1785 \r
1786 #endif /* configUSE_TRACE_FACILITY */\r
1787 /*-----------------------------------------------------------*/\r
1788 \r
1789 #if ( configUSE_TRACE_FACILITY == 1 )\r
1790 \r
1791         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1792         {\r
1793                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1794         }\r
1795 \r
1796 #endif /* configUSE_TRACE_FACILITY */\r
1797 /*-----------------------------------------------------------*/\r
1798 \r
1799 #if ( configUSE_TRACE_FACILITY == 1 )\r
1800 \r
1801         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1802         {\r
1803                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1804         }\r
1805 \r
1806 #endif /* configUSE_TRACE_FACILITY */\r
1807 /*-----------------------------------------------------------*/\r
1808 \r
1809 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1810 {\r
1811 BaseType_t xReturn = pdFALSE;\r
1812 \r
1813         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1814         {\r
1815                 #if ( configUSE_MUTEXES == 1 )\r
1816                 {\r
1817                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1818                         {\r
1819                                 /* The mutex is no longer being held. */\r
1820                                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1821                                 pxQueue->pxMutexHolder = NULL;\r
1822                         }\r
1823                         else\r
1824                         {\r
1825                                 mtCOVERAGE_TEST_MARKER();\r
1826                         }\r
1827                 }\r
1828                 #endif /* configUSE_MUTEXES */\r
1829         }\r
1830         else if( xPosition == queueSEND_TO_BACK )\r
1831         {\r
1832                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1833                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1834                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1835                 {\r
1836                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1837                 }\r
1838                 else\r
1839                 {\r
1840                         mtCOVERAGE_TEST_MARKER();\r
1841                 }\r
1842         }\r
1843         else\r
1844         {\r
1845                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1846                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1847                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1848                 {\r
1849                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1850                 }\r
1851                 else\r
1852                 {\r
1853                         mtCOVERAGE_TEST_MARKER();\r
1854                 }\r
1855 \r
1856                 if( xPosition == queueOVERWRITE )\r
1857                 {\r
1858                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1859                         {\r
1860                                 /* An item is not being added but overwritten, so subtract\r
1861                                 one from the recorded number of items in the queue so when\r
1862                                 one is added again below the number of recorded items remains\r
1863                                 correct. */\r
1864                                 --( pxQueue->uxMessagesWaiting );\r
1865                         }\r
1866                         else\r
1867                         {\r
1868                                 mtCOVERAGE_TEST_MARKER();\r
1869                         }\r
1870                 }\r
1871                 else\r
1872                 {\r
1873                         mtCOVERAGE_TEST_MARKER();\r
1874                 }\r
1875         }\r
1876 \r
1877         ++( pxQueue->uxMessagesWaiting );\r
1878 \r
1879         return xReturn;\r
1880 }\r
1881 /*-----------------------------------------------------------*/\r
1882 \r
1883 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
1884 {\r
1885         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
1886         {\r
1887                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1888                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
1889                 {\r
1890                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1891                 }\r
1892                 else\r
1893                 {\r
1894                         mtCOVERAGE_TEST_MARKER();\r
1895                 }\r
1896                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
1897         }\r
1898 }\r
1899 /*-----------------------------------------------------------*/\r
1900 \r
1901 static void prvUnlockQueue( Queue_t * const pxQueue )\r
1902 {\r
1903         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
1904 \r
1905         /* The lock counts contains the number of extra data items placed or\r
1906         removed from the queue while the queue was locked.  When a queue is\r
1907         locked items can be added or removed, but the event lists cannot be\r
1908         updated. */\r
1909         taskENTER_CRITICAL();\r
1910         {\r
1911                 /* See if data was added to the queue while it was locked. */\r
1912                 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
1913                 {\r
1914                         /* Data was posted while the queue was locked.  Are any tasks\r
1915                         blocked waiting for data to become available? */\r
1916                         #if ( configUSE_QUEUE_SETS == 1 )\r
1917                         {\r
1918                                 if( pxQueue->pxQueueSetContainer != NULL )\r
1919                                 {\r
1920                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1921                                         {\r
1922                                                 /* The queue is a member of a queue set, and posting to\r
1923                                                 the queue set caused a higher priority task to unblock.\r
1924                                                 A context switch is required. */\r
1925                                                 vTaskMissedYield();\r
1926                                         }\r
1927                                         else\r
1928                                         {\r
1929                                                 mtCOVERAGE_TEST_MARKER();\r
1930                                         }\r
1931                                 }\r
1932                                 else\r
1933                                 {\r
1934                                         /* Tasks that are removed from the event list will get added to\r
1935                                         the pending ready list as the scheduler is still suspended. */\r
1936                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1937                                         {\r
1938                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1939                                                 {\r
1940                                                         /* The task waiting has a higher priority so record that a\r
1941                                                         context switch is required. */\r
1942                                                         vTaskMissedYield();\r
1943                                                 }\r
1944                                                 else\r
1945                                                 {\r
1946                                                         mtCOVERAGE_TEST_MARKER();\r
1947                                                 }\r
1948                                         }\r
1949                                         else\r
1950                                         {\r
1951                                                 break;\r
1952                                         }\r
1953                                 }\r
1954                         }\r
1955                         #else /* configUSE_QUEUE_SETS */\r
1956                         {\r
1957                                 /* Tasks that are removed from the event list will get added to\r
1958                                 the pending ready list as the scheduler is still suspended. */\r
1959                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1960                                 {\r
1961                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1962                                         {\r
1963                                                 /* The task waiting has a higher priority so record that a\r
1964                                                 context switch is required. */\r
1965                                                 vTaskMissedYield();\r
1966                                         }\r
1967                                         else\r
1968                                         {\r
1969                                                 mtCOVERAGE_TEST_MARKER();\r
1970                                         }\r
1971                                 }\r
1972                                 else\r
1973                                 {\r
1974                                         break;\r
1975                                 }\r
1976                         }\r
1977                         #endif /* configUSE_QUEUE_SETS */\r
1978 \r
1979                         --( pxQueue->xTxLock );\r
1980                 }\r
1981 \r
1982                 pxQueue->xTxLock = queueUNLOCKED;\r
1983         }\r
1984         taskEXIT_CRITICAL();\r
1985 \r
1986         /* Do the same for the Rx lock. */\r
1987         taskENTER_CRITICAL();\r
1988         {\r
1989                 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
1990                 {\r
1991                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1992                         {\r
1993                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1994                                 {\r
1995                                         vTaskMissedYield();\r
1996                                 }\r
1997                                 else\r
1998                                 {\r
1999                                         mtCOVERAGE_TEST_MARKER();\r
2000                                 }\r
2001 \r
2002                                 --( pxQueue->xRxLock );\r
2003                         }\r
2004                         else\r
2005                         {\r
2006                                 break;\r
2007                         }\r
2008                 }\r
2009 \r
2010                 pxQueue->xRxLock = queueUNLOCKED;\r
2011         }\r
2012         taskEXIT_CRITICAL();\r
2013 }\r
2014 /*-----------------------------------------------------------*/\r
2015 \r
2016 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
2017 {\r
2018 BaseType_t xReturn;\r
2019 \r
2020         taskENTER_CRITICAL();\r
2021         {\r
2022                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
2023                 {\r
2024                         xReturn = pdTRUE;\r
2025                 }\r
2026                 else\r
2027                 {\r
2028                         xReturn = pdFALSE;\r
2029                 }\r
2030         }\r
2031         taskEXIT_CRITICAL();\r
2032 \r
2033         return xReturn;\r
2034 }\r
2035 /*-----------------------------------------------------------*/\r
2036 \r
2037 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
2038 {\r
2039 BaseType_t xReturn;\r
2040 \r
2041         configASSERT( xQueue );\r
2042         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2043         {\r
2044                 xReturn = pdTRUE;\r
2045         }\r
2046         else\r
2047         {\r
2048                 xReturn = pdFALSE;\r
2049         }\r
2050 \r
2051         return xReturn;\r
2052 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2053 /*-----------------------------------------------------------*/\r
2054 \r
2055 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
2056 {\r
2057 BaseType_t xReturn;\r
2058 \r
2059         taskENTER_CRITICAL();\r
2060         {\r
2061                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2062                 {\r
2063                         xReturn = pdTRUE;\r
2064                 }\r
2065                 else\r
2066                 {\r
2067                         xReturn = pdFALSE;\r
2068                 }\r
2069         }\r
2070         taskEXIT_CRITICAL();\r
2071 \r
2072         return xReturn;\r
2073 }\r
2074 /*-----------------------------------------------------------*/\r
2075 \r
2076 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2077 {\r
2078 BaseType_t xReturn;\r
2079 \r
2080         configASSERT( xQueue );\r
2081         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
2082         {\r
2083                 xReturn = pdTRUE;\r
2084         }\r
2085         else\r
2086         {\r
2087                 xReturn = pdFALSE;\r
2088         }\r
2089 \r
2090         return xReturn;\r
2091 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2092 /*-----------------------------------------------------------*/\r
2093 \r
2094 #if ( configUSE_CO_ROUTINES == 1 )\r
2095 \r
2096         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2097         {\r
2098         BaseType_t xReturn;\r
2099         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2100 \r
2101                 /* If the queue is already full we may have to block.  A critical section\r
2102                 is required to prevent an interrupt removing something from the queue\r
2103                 between the check to see if the queue is full and blocking on the queue. */\r
2104                 portDISABLE_INTERRUPTS();\r
2105                 {\r
2106                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2107                         {\r
2108                                 /* The queue is full - do we want to block or just leave without\r
2109                                 posting? */\r
2110                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2111                                 {\r
2112                                         /* As this is called from a coroutine we cannot block directly, but\r
2113                                         return indicating that we need to block. */\r
2114                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2115                                         portENABLE_INTERRUPTS();\r
2116                                         return errQUEUE_BLOCKED;\r
2117                                 }\r
2118                                 else\r
2119                                 {\r
2120                                         portENABLE_INTERRUPTS();\r
2121                                         return errQUEUE_FULL;\r
2122                                 }\r
2123                         }\r
2124                 }\r
2125                 portENABLE_INTERRUPTS();\r
2126 \r
2127                 portDISABLE_INTERRUPTS();\r
2128                 {\r
2129                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2130                         {\r
2131                                 /* There is room in the queue, copy the data into the queue. */\r
2132                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2133                                 xReturn = pdPASS;\r
2134 \r
2135                                 /* Were any co-routines waiting for data to become available? */\r
2136                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2137                                 {\r
2138                                         /* In this instance the co-routine could be placed directly\r
2139                                         into the ready list as we are within a critical section.\r
2140                                         Instead the same pending ready list mechanism is used as if\r
2141                                         the event were caused from within an interrupt. */\r
2142                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2143                                         {\r
2144                                                 /* The co-routine waiting has a higher priority so record\r
2145                                                 that a yield might be appropriate. */\r
2146                                                 xReturn = errQUEUE_YIELD;\r
2147                                         }\r
2148                                         else\r
2149                                         {\r
2150                                                 mtCOVERAGE_TEST_MARKER();\r
2151                                         }\r
2152                                 }\r
2153                                 else\r
2154                                 {\r
2155                                         mtCOVERAGE_TEST_MARKER();\r
2156                                 }\r
2157                         }\r
2158                         else\r
2159                         {\r
2160                                 xReturn = errQUEUE_FULL;\r
2161                         }\r
2162                 }\r
2163                 portENABLE_INTERRUPTS();\r
2164 \r
2165                 return xReturn;\r
2166         }\r
2167 \r
2168 #endif /* configUSE_CO_ROUTINES */\r
2169 /*-----------------------------------------------------------*/\r
2170 \r
2171 #if ( configUSE_CO_ROUTINES == 1 )\r
2172 \r
2173         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2174         {\r
2175         BaseType_t xReturn;\r
2176         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2177 \r
2178                 /* If the queue is already empty we may have to block.  A critical section\r
2179                 is required to prevent an interrupt adding something to the queue\r
2180                 between the check to see if the queue is empty and blocking on the queue. */\r
2181                 portDISABLE_INTERRUPTS();\r
2182                 {\r
2183                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2184                         {\r
2185                                 /* There are no messages in the queue, do we want to block or just\r
2186                                 leave with nothing? */\r
2187                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2188                                 {\r
2189                                         /* As this is a co-routine we cannot block directly, but return\r
2190                                         indicating that we need to block. */\r
2191                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2192                                         portENABLE_INTERRUPTS();\r
2193                                         return errQUEUE_BLOCKED;\r
2194                                 }\r
2195                                 else\r
2196                                 {\r
2197                                         portENABLE_INTERRUPTS();\r
2198                                         return errQUEUE_FULL;\r
2199                                 }\r
2200                         }\r
2201                         else\r
2202                         {\r
2203                                 mtCOVERAGE_TEST_MARKER();\r
2204                         }\r
2205                 }\r
2206                 portENABLE_INTERRUPTS();\r
2207 \r
2208                 portDISABLE_INTERRUPTS();\r
2209                 {\r
2210                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2211                         {\r
2212                                 /* Data is available from the queue. */\r
2213                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2214                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2215                                 {\r
2216                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2217                                 }\r
2218                                 else\r
2219                                 {\r
2220                                         mtCOVERAGE_TEST_MARKER();\r
2221                                 }\r
2222                                 --( pxQueue->uxMessagesWaiting );\r
2223                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2224 \r
2225                                 xReturn = pdPASS;\r
2226 \r
2227                                 /* Were any co-routines waiting for space to become available? */\r
2228                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2229                                 {\r
2230                                         /* In this instance the co-routine could be placed directly\r
2231                                         into the ready list as we are within a critical section.\r
2232                                         Instead the same pending ready list mechanism is used as if\r
2233                                         the event were caused from within an interrupt. */\r
2234                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2235                                         {\r
2236                                                 xReturn = errQUEUE_YIELD;\r
2237                                         }\r
2238                                         else\r
2239                                         {\r
2240                                                 mtCOVERAGE_TEST_MARKER();\r
2241                                         }\r
2242                                 }\r
2243                                 else\r
2244                                 {\r
2245                                         mtCOVERAGE_TEST_MARKER();\r
2246                                 }\r
2247                         }\r
2248                         else\r
2249                         {\r
2250                                 xReturn = pdFAIL;\r
2251                         }\r
2252                 }\r
2253                 portENABLE_INTERRUPTS();\r
2254 \r
2255                 return xReturn;\r
2256         }\r
2257 \r
2258 #endif /* configUSE_CO_ROUTINES */\r
2259 /*-----------------------------------------------------------*/\r
2260 \r
2261 #if ( configUSE_CO_ROUTINES == 1 )\r
2262 \r
2263         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2264         {\r
2265         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2266 \r
2267                 /* Cannot block within an ISR so if there is no space on the queue then\r
2268                 exit without doing anything. */\r
2269                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2270                 {\r
2271                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2272 \r
2273                         /* We only want to wake one co-routine per ISR, so check that a\r
2274                         co-routine has not already been woken. */\r
2275                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2276                         {\r
2277                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2278                                 {\r
2279                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2280                                         {\r
2281                                                 return pdTRUE;\r
2282                                         }\r
2283                                         else\r
2284                                         {\r
2285                                                 mtCOVERAGE_TEST_MARKER();\r
2286                                         }\r
2287                                 }\r
2288                                 else\r
2289                                 {\r
2290                                         mtCOVERAGE_TEST_MARKER();\r
2291                                 }\r
2292                         }\r
2293                         else\r
2294                         {\r
2295                                 mtCOVERAGE_TEST_MARKER();\r
2296                         }\r
2297                 }\r
2298                 else\r
2299                 {\r
2300                         mtCOVERAGE_TEST_MARKER();\r
2301                 }\r
2302 \r
2303                 return xCoRoutinePreviouslyWoken;\r
2304         }\r
2305 \r
2306 #endif /* configUSE_CO_ROUTINES */\r
2307 /*-----------------------------------------------------------*/\r
2308 \r
2309 #if ( configUSE_CO_ROUTINES == 1 )\r
2310 \r
2311         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2312         {\r
2313         BaseType_t xReturn;\r
2314         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2315 \r
2316                 /* We cannot block from an ISR, so check there is data available. If\r
2317                 not then just leave without doing anything. */\r
2318                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2319                 {\r
2320                         /* Copy the data from the queue. */\r
2321                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2322                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2323                         {\r
2324                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2325                         }\r
2326                         else\r
2327                         {\r
2328                                 mtCOVERAGE_TEST_MARKER();\r
2329                         }\r
2330                         --( pxQueue->uxMessagesWaiting );\r
2331                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2332 \r
2333                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2334                         {\r
2335                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2336                                 {\r
2337                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2338                                         {\r
2339                                                 *pxCoRoutineWoken = pdTRUE;\r
2340                                         }\r
2341                                         else\r
2342                                         {\r
2343                                                 mtCOVERAGE_TEST_MARKER();\r
2344                                         }\r
2345                                 }\r
2346                                 else\r
2347                                 {\r
2348                                         mtCOVERAGE_TEST_MARKER();\r
2349                                 }\r
2350                         }\r
2351                         else\r
2352                         {\r
2353                                 mtCOVERAGE_TEST_MARKER();\r
2354                         }\r
2355 \r
2356                         xReturn = pdPASS;\r
2357                 }\r
2358                 else\r
2359                 {\r
2360                         xReturn = pdFAIL;\r
2361                 }\r
2362 \r
2363                 return xReturn;\r
2364         }\r
2365 \r
2366 #endif /* configUSE_CO_ROUTINES */\r
2367 /*-----------------------------------------------------------*/\r
2368 \r
2369 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2370 \r
2371         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2372         {\r
2373         UBaseType_t ux;\r
2374 \r
2375                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2376                 a free slot. */\r
2377                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2378                 {\r
2379                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2380                         {\r
2381                                 /* Store the information on this queue. */\r
2382                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2383                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2384 \r
2385                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2386                                 break;\r
2387                         }\r
2388                         else\r
2389                         {\r
2390                                 mtCOVERAGE_TEST_MARKER();\r
2391                         }\r
2392                 }\r
2393         }\r
2394 \r
2395 #endif /* configQUEUE_REGISTRY_SIZE */\r
2396 /*-----------------------------------------------------------*/\r
2397 \r
2398 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2399 \r
2400         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2401         {\r
2402         UBaseType_t ux;\r
2403 \r
2404                 /* See if the handle of the queue being unregistered in actually in the\r
2405                 registry. */\r
2406                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2407                 {\r
2408                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2409                         {\r
2410                                 /* Set the name to NULL to show that this slot if free again. */\r
2411                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2412                                 break;\r
2413                         }\r
2414                         else\r
2415                         {\r
2416                                 mtCOVERAGE_TEST_MARKER();\r
2417                         }\r
2418                 }\r
2419 \r
2420         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2421 \r
2422 #endif /* configQUEUE_REGISTRY_SIZE */\r
2423 /*-----------------------------------------------------------*/\r
2424 \r
2425 #if ( configUSE_TIMERS == 1 )\r
2426 \r
2427         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )\r
2428         {\r
2429         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2430 \r
2431                 /* This function should not be called by application code hence the\r
2432                 'Restricted' in its name.  It is not part of the public API.  It is\r
2433                 designed for use by kernel code, and has special calling requirements.\r
2434                 It can result in vListInsert() being called on a list that can only\r
2435                 possibly ever have one item in it, so the list will be fast, but even\r
2436                 so it should be called with the scheduler locked and not from a critical\r
2437                 section. */\r
2438 \r
2439                 /* Only do anything if there are no messages in the queue.  This function\r
2440                 will not actually cause the task to block, just place it on a blocked\r
2441                 list.  It will not block until the scheduler is unlocked - at which\r
2442                 time a yield will be performed.  If an item is added to the queue while\r
2443                 the queue is locked, and the calling task blocks on the queue, then the\r
2444                 calling task will be immediately unblocked when the queue is unlocked. */\r
2445                 prvLockQueue( pxQueue );\r
2446                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2447                 {\r
2448                         /* There is nothing in the queue, block for the specified period. */\r
2449                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
2450                 }\r
2451                 else\r
2452                 {\r
2453                         mtCOVERAGE_TEST_MARKER();\r
2454                 }\r
2455                 prvUnlockQueue( pxQueue );\r
2456         }\r
2457 \r
2458 #endif /* configUSE_TIMERS */\r
2459 /*-----------------------------------------------------------*/\r
2460 \r
2461 #if ( configUSE_QUEUE_SETS == 1 )\r
2462 \r
2463         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2464         {\r
2465         QueueSetHandle_t pxQueue;\r
2466 \r
2467                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2468 \r
2469                 return pxQueue;\r
2470         }\r
2471 \r
2472 #endif /* configUSE_QUEUE_SETS */\r
2473 /*-----------------------------------------------------------*/\r
2474 \r
2475 #if ( configUSE_QUEUE_SETS == 1 )\r
2476 \r
2477         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2478         {\r
2479         BaseType_t xReturn;\r
2480 \r
2481                 taskENTER_CRITICAL();\r
2482                 {\r
2483                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2484                         {\r
2485                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2486                                 xReturn = pdFAIL;\r
2487                         }\r
2488                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2489                         {\r
2490                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2491                                 items in the queue/semaphore. */\r
2492                                 xReturn = pdFAIL;\r
2493                         }\r
2494                         else\r
2495                         {\r
2496                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2497                                 xReturn = pdPASS;\r
2498                         }\r
2499                 }\r
2500                 taskEXIT_CRITICAL();\r
2501 \r
2502                 return xReturn;\r
2503         }\r
2504 \r
2505 #endif /* configUSE_QUEUE_SETS */\r
2506 /*-----------------------------------------------------------*/\r
2507 \r
2508 #if ( configUSE_QUEUE_SETS == 1 )\r
2509 \r
2510         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2511         {\r
2512         BaseType_t xReturn;\r
2513         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2514 \r
2515                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2516                 {\r
2517                         /* The queue was not a member of the set. */\r
2518                         xReturn = pdFAIL;\r
2519                 }\r
2520                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2521                 {\r
2522                         /* It is dangerous to remove a queue from a set when the queue is\r
2523                         not empty because the queue set will still hold pending events for\r
2524                         the queue. */\r
2525                         xReturn = pdFAIL;\r
2526                 }\r
2527                 else\r
2528                 {\r
2529                         taskENTER_CRITICAL();\r
2530                         {\r
2531                                 /* The queue is no longer contained in the set. */\r
2532                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2533                         }\r
2534                         taskEXIT_CRITICAL();\r
2535                         xReturn = pdPASS;\r
2536                 }\r
2537 \r
2538                 return xReturn;\r
2539         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2540 \r
2541 #endif /* configUSE_QUEUE_SETS */\r
2542 /*-----------------------------------------------------------*/\r
2543 \r
2544 #if ( configUSE_QUEUE_SETS == 1 )\r
2545 \r
2546         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2547         {\r
2548         QueueSetMemberHandle_t xReturn = NULL;\r
2549 \r
2550                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2551                 return xReturn;\r
2552         }\r
2553 \r
2554 #endif /* configUSE_QUEUE_SETS */\r
2555 /*-----------------------------------------------------------*/\r
2556 \r
2557 #if ( configUSE_QUEUE_SETS == 1 )\r
2558 \r
2559         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2560         {\r
2561         QueueSetMemberHandle_t xReturn = NULL;\r
2562 \r
2563                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2564                 return xReturn;\r
2565         }\r
2566 \r
2567 #endif /* configUSE_QUEUE_SETS */\r
2568 /*-----------------------------------------------------------*/\r
2569 \r
2570 #if ( configUSE_QUEUE_SETS == 1 )\r
2571 \r
2572         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2573         {\r
2574         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2575         BaseType_t xReturn = pdFALSE;\r
2576 \r
2577                 /* This function must be called form a critical section. */\r
2578 \r
2579                 configASSERT( pxQueueSetContainer );\r
2580                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2581 \r
2582                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2583                 {\r
2584                         traceQUEUE_SEND( pxQueueSetContainer );\r
2585                         /* The data copied is the handle of the queue that contains data. */\r
2586                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2587 \r
2588                         if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2589                         {\r
2590                                 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2591                                 {\r
2592                                         /* The task waiting has a higher priority */\r
2593                                         xReturn = pdTRUE;\r
2594                                 }\r
2595                                 else\r
2596                                 {\r
2597                                         mtCOVERAGE_TEST_MARKER();\r
2598                                 }\r
2599                         }\r
2600                         else\r
2601                         {\r
2602                                 mtCOVERAGE_TEST_MARKER();\r
2603                         }\r
2604                 }\r
2605                 else\r
2606                 {\r
2607                         mtCOVERAGE_TEST_MARKER();\r
2608                 }\r
2609 \r
2610                 return xReturn;\r
2611         }\r
2612 \r
2613 #endif /* configUSE_QUEUE_SETS */\r
2614 \r
2615 \r
2616 \r
2617 \r
2618 \r
2619 \r
2620 \r
2621 \r
2622 \r
2623 \r
2624 \r
2625 \r