]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Minor updates to ensure all kernel aware debuggers are happy with V8.
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V8.0.0 - Copyright (C) 2014 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     ***************************************************************************\r
8      *                                                                       *\r
9      *    FreeRTOS provides completely free yet professionally developed,    *\r
10      *    robust, strictly quality controlled, supported, and cross          *\r
11      *    platform software that has become a de facto standard.             *\r
12      *                                                                       *\r
13      *    Help yourself get started quickly and support the FreeRTOS         *\r
14      *    project by purchasing a FreeRTOS tutorial book, reference          *\r
15      *    manual, or both from: http://www.FreeRTOS.org/Documentation        *\r
16      *                                                                       *\r
17      *    Thank you!                                                         *\r
18      *                                                                       *\r
19     ***************************************************************************\r
20 \r
21     This file is part of the FreeRTOS distribution.\r
22 \r
23     FreeRTOS is free software; you can redistribute it and/or modify it under\r
24     the terms of the GNU General Public License (version 2) as published by the\r
25     Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
26 \r
27     >>! NOTE: The modification to the GPL is included to allow you to distribute\r
28     >>! a combined work that includes FreeRTOS without being obliged to provide\r
29     >>! the source code for proprietary components outside of the FreeRTOS\r
30     >>! kernel.\r
31 \r
32     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
33     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
34     FOR A PARTICULAR PURPOSE.  Full license text is available from the following\r
35     link: http://www.freertos.org/a00114.html\r
36 \r
37     1 tab == 4 spaces!\r
38 \r
39     ***************************************************************************\r
40      *                                                                       *\r
41      *    Having a problem?  Start by reading the FAQ "My application does   *\r
42      *    not run, what could be wrong?"                                     *\r
43      *                                                                       *\r
44      *    http://www.FreeRTOS.org/FAQHelp.html                               *\r
45      *                                                                       *\r
46     ***************************************************************************\r
47 \r
48     http://www.FreeRTOS.org - Documentation, books, training, latest versions,\r
49     license and Real Time Engineers Ltd. contact details.\r
50 \r
51     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
52     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
53     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
54 \r
55     http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High\r
56     Integrity Systems to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
57     licenses offer ticketed support, indemnification and middleware.\r
58 \r
59     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
60     engineered and independently SIL3 certified version for use in safety and\r
61     mission critical applications that require provable dependability.\r
62 \r
63     1 tab == 4 spaces!\r
64 */\r
65 \r
66 #include <stdlib.h>\r
67 #include <string.h>\r
68 \r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
70 all the API functions to use the MPU wrappers.  That should only be done when\r
71 task.h is included from an application file. */\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
73 \r
74 #include "FreeRTOS.h"\r
75 #include "task.h"\r
76 #include "queue.h"\r
77 \r
78 #if ( configUSE_CO_ROUTINES == 1 )\r
79         #include "croutine.h"\r
80 #endif\r
81 \r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
84 header files above, but not in this file, in order to generate the correct\r
85 privileged Vs unprivileged linkage and placement. */\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
87 \r
88 \r
89 /* Constants used with the xRxLock and xTxLock structure members. */\r
90 #define queueUNLOCKED                                   ( ( BaseType_t ) -1 )\r
91 #define queueLOCKED_UNMODIFIED                  ( ( BaseType_t ) 0 )\r
92 \r
93 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
94 pcTail members are used as pointers into the queue storage area.  When the\r
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
97 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
98 names to the pcHead and pcTail structure members to ensure the readability of\r
99 the code is maintained despite this dual use of two structure members.  An\r
100 alternative implementation would be to use a union, but use of a union is\r
101 against the coding standard (although an exception to the standard has been\r
102 permitted where the dual use also significantly changes the type of the\r
103 structure member). */\r
104 #define pxMutexHolder                                   pcTail\r
105 #define uxQueueType                                             pcHead\r
106 #define queueQUEUE_IS_MUTEX                             NULL\r
107 \r
108 /* Semaphores do not actually store or copy data, so have an item size of\r
109 zero. */\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
111 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
112 \r
113 #if( configUSE_PREEMPTION == 0 )\r
114         /* If the cooperative scheduler is being used then a yield should not be\r
115         performed just because a higher priority task has been woken. */\r
116         #define queueYIELD_IF_USING_PREEMPTION()\r
117 #else\r
118         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
119 #endif\r
120 \r
121 /*\r
122  * Definition of the queue used by the scheduler.\r
123  * Items are queued by copy, not reference.\r
124  */\r
125 typedef struct QueueDefinition\r
126 {\r
127         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
128         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
129         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
130 \r
131         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
132         {\r
133                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
134                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
135         } u;\r
136 \r
137         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
138         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
139 \r
140         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
141         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
142         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
143 \r
144         volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
145         volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
146 \r
147         #if ( configUSE_TRACE_FACILITY == 1 )\r
148                 UBaseType_t uxQueueNumber;\r
149                 uint8_t ucQueueType;\r
150         #endif\r
151 \r
152         #if ( configUSE_QUEUE_SETS == 1 )\r
153                 struct QueueDefinition *pxQueueSetContainer;\r
154         #endif\r
155 \r
156 } xQUEUE;\r
157 \r
158 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
159 name below to enable the use of older kernel aware debuggers. */\r
160 typedef xQUEUE Queue_t;\r
161 \r
162 /*-----------------------------------------------------------*/\r
163 \r
164 /*\r
165  * The queue registry is just a means for kernel aware debuggers to locate\r
166  * queue structures.  It has no other purpose so is an optional component.\r
167  */\r
168 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
169 \r
170         /* The type stored within the queue registry array.  This allows a name\r
171         to be assigned to each queue making kernel aware debugging a little\r
172         more user friendly. */\r
173         typedef struct QUEUE_REGISTRY_ITEM\r
174         {\r
175                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
176                 QueueHandle_t xHandle;\r
177         } xQueueRegistryItem;\r
178 \r
179         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
180         new xQueueRegistryItem name below to enable the use of older kernel aware\r
181         debuggers. */\r
182         typedef xQueueRegistryItem QueueRegistryItem_t;\r
183 \r
184         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
185         The pcQueueName member of a structure being NULL is indicative of the\r
186         array position being vacant. */\r
187         QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
188 \r
189 #endif /* configQUEUE_REGISTRY_SIZE */\r
190 \r
191 /*\r
192  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
193  * prevent an ISR from adding or removing items to the queue, but does prevent\r
194  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
195  * queue is locked it will instead increment the appropriate queue lock count\r
196  * to indicate that a task may require unblocking.  When the queue in unlocked\r
197  * these lock counts are inspected, and the appropriate action taken.\r
198  */\r
199 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
200 \r
201 /*\r
202  * Uses a critical section to determine if there is any data in a queue.\r
203  *\r
204  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
205  */\r
206 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
207 \r
208 /*\r
209  * Uses a critical section to determine if there is any space in a queue.\r
210  *\r
211  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
212  */\r
213 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
214 \r
215 /*\r
216  * Copies an item into the queue, either at the front of the queue or the\r
217  * back of the queue.\r
218  */\r
219 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
220 \r
221 /*\r
222  * Copies an item out of a queue.\r
223  */\r
224 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
225 \r
226 #if ( configUSE_QUEUE_SETS == 1 )\r
227         /*\r
228          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
229          * the queue set that the queue contains data.\r
230          */\r
231         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
232 #endif\r
233 \r
234 /*-----------------------------------------------------------*/\r
235 \r
236 /*\r
237  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
238  * accessing the queue event lists.\r
239  */\r
240 #define prvLockQueue( pxQueue )                                                         \\r
241         taskENTER_CRITICAL();                                                                   \\r
242         {                                                                                                               \\r
243                 if( ( pxQueue )->xRxLock == queueUNLOCKED )                     \\r
244                 {                                                                                                       \\r
245                         ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \\r
246                 }                                                                                                       \\r
247                 if( ( pxQueue )->xTxLock == queueUNLOCKED )                     \\r
248                 {                                                                                                       \\r
249                         ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \\r
250                 }                                                                                                       \\r
251         }                                                                                                               \\r
252         taskEXIT_CRITICAL()\r
253 /*-----------------------------------------------------------*/\r
254 \r
255 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
256 {\r
257 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
258 \r
259         configASSERT( pxQueue );\r
260 \r
261         taskENTER_CRITICAL();\r
262         {\r
263                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
264                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
265                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
266                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
267                 pxQueue->xRxLock = queueUNLOCKED;\r
268                 pxQueue->xTxLock = queueUNLOCKED;\r
269 \r
270                 if( xNewQueue == pdFALSE )\r
271                 {\r
272                         /* If there are tasks blocked waiting to read from the queue, then\r
273                         the tasks will remain blocked as after this function exits the queue\r
274                         will still be empty.  If there are tasks blocked waiting to write to\r
275                         the queue, then one should be unblocked as after this function exits\r
276                         it will be possible to write to it. */\r
277                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
278                         {\r
279                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
280                                 {\r
281                                         queueYIELD_IF_USING_PREEMPTION();\r
282                                 }\r
283                                 else\r
284                                 {\r
285                                         mtCOVERAGE_TEST_MARKER();\r
286                                 }\r
287                         }\r
288                         else\r
289                         {\r
290                                 mtCOVERAGE_TEST_MARKER();\r
291                         }\r
292                 }\r
293                 else\r
294                 {\r
295                         /* Ensure the event queues start in the correct state. */\r
296                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
297                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
298                 }\r
299         }\r
300         taskEXIT_CRITICAL();\r
301 \r
302         /* A value is returned for calling semantic consistency with previous\r
303         versions. */\r
304         return pdPASS;\r
305 }\r
306 /*-----------------------------------------------------------*/\r
307 \r
308 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
309 {\r
310 Queue_t *pxNewQueue;\r
311 size_t xQueueSizeInBytes;\r
312 QueueHandle_t xReturn = NULL;\r
313 \r
314         /* Remove compiler warnings about unused parameters should\r
315         configUSE_TRACE_FACILITY not be set to 1. */\r
316         ( void ) ucQueueType;\r
317 \r
318         /* Allocate the new queue structure. */\r
319         if( uxQueueLength > ( UBaseType_t ) 0 )\r
320         {\r
321                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
322                 if( pxNewQueue != NULL )\r
323                 {\r
324                         /* Create the list of pointers to queue items.  The queue is one byte\r
325                         longer than asked for to make wrap checking easier/faster. */\r
326                         xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
327 \r
328                         pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );\r
329                         if( pxNewQueue->pcHead != NULL )\r
330                         {\r
331                                 /* Initialise the queue members as described above where the\r
332                                 queue type is defined. */\r
333                                 pxNewQueue->uxLength = uxQueueLength;\r
334                                 pxNewQueue->uxItemSize = uxItemSize;\r
335                                 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
336 \r
337                                 #if ( configUSE_TRACE_FACILITY == 1 )\r
338                                 {\r
339                                         pxNewQueue->ucQueueType = ucQueueType;\r
340                                 }\r
341                                 #endif /* configUSE_TRACE_FACILITY */\r
342 \r
343                                 #if( configUSE_QUEUE_SETS == 1 )\r
344                                 {\r
345                                         pxNewQueue->pxQueueSetContainer = NULL;\r
346                                 }\r
347                                 #endif /* configUSE_QUEUE_SETS */\r
348 \r
349                                 traceQUEUE_CREATE( pxNewQueue );\r
350                                 xReturn = pxNewQueue;\r
351                         }\r
352                         else\r
353                         {\r
354                                 traceQUEUE_CREATE_FAILED( ucQueueType );\r
355                                 vPortFree( pxNewQueue );\r
356                         }\r
357                 }\r
358                 else\r
359                 {\r
360                         mtCOVERAGE_TEST_MARKER();\r
361                 }\r
362         }\r
363         else\r
364         {\r
365                 mtCOVERAGE_TEST_MARKER();\r
366         }\r
367 \r
368         configASSERT( xReturn );\r
369 \r
370         return xReturn;\r
371 }\r
372 /*-----------------------------------------------------------*/\r
373 \r
374 #if ( configUSE_MUTEXES == 1 )\r
375 \r
376         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
377         {\r
378         Queue_t *pxNewQueue;\r
379 \r
380                 /* Prevent compiler warnings about unused parameters if\r
381                 configUSE_TRACE_FACILITY does not equal 1. */\r
382                 ( void ) ucQueueType;\r
383 \r
384                 /* Allocate the new queue structure. */\r
385                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
386                 if( pxNewQueue != NULL )\r
387                 {\r
388                         /* Information required for priority inheritance. */\r
389                         pxNewQueue->pxMutexHolder = NULL;\r
390                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
391 \r
392                         /* Queues used as a mutex no data is actually copied into or out\r
393                         of the queue. */\r
394                         pxNewQueue->pcWriteTo = NULL;\r
395                         pxNewQueue->u.pcReadFrom = NULL;\r
396 \r
397                         /* Each mutex has a length of 1 (like a binary semaphore) and\r
398                         an item size of 0 as nothing is actually copied into or out\r
399                         of the mutex. */\r
400                         pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
401                         pxNewQueue->uxLength = ( UBaseType_t ) 1U;\r
402                         pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;\r
403                         pxNewQueue->xRxLock = queueUNLOCKED;\r
404                         pxNewQueue->xTxLock = queueUNLOCKED;\r
405 \r
406                         #if ( configUSE_TRACE_FACILITY == 1 )\r
407                         {\r
408                                 pxNewQueue->ucQueueType = ucQueueType;\r
409                         }\r
410                         #endif\r
411 \r
412                         #if ( configUSE_QUEUE_SETS == 1 )\r
413                         {\r
414                                 pxNewQueue->pxQueueSetContainer = NULL;\r
415                         }\r
416                         #endif\r
417 \r
418                         /* Ensure the event queues start with the correct state. */\r
419                         vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );\r
420                         vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );\r
421 \r
422                         traceCREATE_MUTEX( pxNewQueue );\r
423 \r
424                         /* Start with the semaphore in the expected state. */\r
425                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
426                 }\r
427                 else\r
428                 {\r
429                         traceCREATE_MUTEX_FAILED();\r
430                 }\r
431 \r
432                 configASSERT( pxNewQueue );\r
433                 return pxNewQueue;\r
434         }\r
435 \r
436 #endif /* configUSE_MUTEXES */\r
437 /*-----------------------------------------------------------*/\r
438 \r
439 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
440 \r
441         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
442         {\r
443         void *pxReturn;\r
444 \r
445                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
446                 be called directly.  Note:  This is a good way of determining if the\r
447                 calling task is the mutex holder, but not a good way of determining the\r
448                 identity of the mutex holder, as the holder may change between the\r
449                 following critical section exiting and the function returning. */\r
450                 taskENTER_CRITICAL();\r
451                 {\r
452                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
453                         {\r
454                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
455                         }\r
456                         else\r
457                         {\r
458                                 pxReturn = NULL;\r
459                         }\r
460                 }\r
461                 taskEXIT_CRITICAL();\r
462 \r
463                 return pxReturn;\r
464         }\r
465 \r
466 #endif\r
467 /*-----------------------------------------------------------*/\r
468 \r
469 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
470 \r
471         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
472         {\r
473         BaseType_t xReturn;\r
474         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
475 \r
476                 configASSERT( pxMutex );\r
477 \r
478                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
479                 change outside of this task.  If this task does not hold the mutex then\r
480                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
481                 this is the only condition we are interested in it does not matter if\r
482                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
483                 mutual exclusion is required to test the pxMutexHolder variable. */\r
484                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
485                 {\r
486                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
487 \r
488                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
489                         the task handle, therefore no underflow check is required.  Also,\r
490                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
491                         there can only be one, no mutual exclusion is required to modify the\r
492                         uxRecursiveCallCount member. */\r
493                         ( pxMutex->u.uxRecursiveCallCount )--;\r
494 \r
495                         /* Have we unwound the call count? */\r
496                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
497                         {\r
498                                 /* Return the mutex.  This will automatically unblock any other\r
499                                 task that might be waiting to access the mutex. */\r
500                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
501                         }\r
502                         else\r
503                         {\r
504                                 mtCOVERAGE_TEST_MARKER();\r
505                         }\r
506 \r
507                         xReturn = pdPASS;\r
508                 }\r
509                 else\r
510                 {\r
511                         /* We cannot give the mutex because we are not the holder. */\r
512                         xReturn = pdFAIL;\r
513 \r
514                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
515                 }\r
516 \r
517                 return xReturn;\r
518         }\r
519 \r
520 #endif /* configUSE_RECURSIVE_MUTEXES */\r
521 /*-----------------------------------------------------------*/\r
522 \r
523 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
524 \r
525         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
526         {\r
527         BaseType_t xReturn;\r
528         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
529 \r
530                 configASSERT( pxMutex );\r
531 \r
532                 /* Comments regarding mutual exclusion as per those within\r
533                 xQueueGiveMutexRecursive(). */\r
534 \r
535                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
536 \r
537                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
538                 {\r
539                         ( pxMutex->u.uxRecursiveCallCount )++;\r
540                         xReturn = pdPASS;\r
541                 }\r
542                 else\r
543                 {\r
544                         xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
545 \r
546                         /* pdPASS will only be returned if we successfully obtained the mutex,\r
547                         we may have blocked to reach here. */\r
548                         if( xReturn == pdPASS )\r
549                         {\r
550                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
551                         }\r
552                         else\r
553                         {\r
554                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
555                         }\r
556                 }\r
557 \r
558                 return xReturn;\r
559         }\r
560 \r
561 #endif /* configUSE_RECURSIVE_MUTEXES */\r
562 /*-----------------------------------------------------------*/\r
563 \r
564 #if ( configUSE_COUNTING_SEMAPHORES == 1 )\r
565 \r
566         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
567         {\r
568         QueueHandle_t xHandle;\r
569 \r
570                 configASSERT( uxMaxCount != 0 );\r
571                 configASSERT( uxInitialCount <= uxMaxCount );\r
572 \r
573                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
574 \r
575                 if( xHandle != NULL )\r
576                 {\r
577                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
578 \r
579                         traceCREATE_COUNTING_SEMAPHORE();\r
580                 }\r
581                 else\r
582                 {\r
583                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
584                 }\r
585 \r
586                 configASSERT( xHandle );\r
587                 return xHandle;\r
588         }\r
589 \r
590 #endif /* configUSE_COUNTING_SEMAPHORES */\r
591 /*-----------------------------------------------------------*/\r
592 \r
593 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
594 {\r
595 BaseType_t xEntryTimeSet = pdFALSE;\r
596 TimeOut_t xTimeOut;\r
597 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
598 \r
599         configASSERT( pxQueue );\r
600         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
601         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
602         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
603         {\r
604                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
605         }\r
606         #endif\r
607 \r
608 \r
609         /* This function relaxes the coding standard somewhat to allow return\r
610         statements within the function itself.  This is done in the interest\r
611         of execution time efficiency. */\r
612         for( ;; )\r
613         {\r
614                 taskENTER_CRITICAL();\r
615                 {\r
616                         /* Is there room on the queue now?  The running task must be\r
617                         the highest priority task wanting to access the queue.  If\r
618                         the head item in the queue is to be overwritten then it does\r
619                         not matter if the queue is full. */\r
620                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
621                         {\r
622                                 traceQUEUE_SEND( pxQueue );\r
623                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
624 \r
625                                 #if ( configUSE_QUEUE_SETS == 1 )\r
626                                 {\r
627                                         if( pxQueue->pxQueueSetContainer != NULL )\r
628                                         {\r
629                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
630                                                 {\r
631                                                         /* The queue is a member of a queue set, and posting\r
632                                                         to the queue set caused a higher priority task to\r
633                                                         unblock. A context switch is required. */\r
634                                                         queueYIELD_IF_USING_PREEMPTION();\r
635                                                 }\r
636                                                 else\r
637                                                 {\r
638                                                         mtCOVERAGE_TEST_MARKER();\r
639                                                 }\r
640                                         }\r
641                                         else\r
642                                         {\r
643                                                 /* If there was a task waiting for data to arrive on the\r
644                                                 queue then unblock it now. */\r
645                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
646                                                 {\r
647                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
648                                                         {\r
649                                                                 /* The unblocked task has a priority higher than\r
650                                                                 our own so yield immediately.  Yes it is ok to\r
651                                                                 do this from within the critical section - the\r
652                                                                 kernel takes care of that. */\r
653                                                                 queueYIELD_IF_USING_PREEMPTION();\r
654                                                         }\r
655                                                         else\r
656                                                         {\r
657                                                                 mtCOVERAGE_TEST_MARKER();\r
658                                                         }\r
659                                                 }\r
660                                                 else\r
661                                                 {\r
662                                                         mtCOVERAGE_TEST_MARKER();\r
663                                                 }\r
664                                         }\r
665                                 }\r
666                                 #else /* configUSE_QUEUE_SETS */\r
667                                 {\r
668                                         /* If there was a task waiting for data to arrive on the\r
669                                         queue then unblock it now. */\r
670                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
671                                         {\r
672                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
673                                                 {\r
674                                                         /* The unblocked task has a priority higher than\r
675                                                         our own so yield immediately.  Yes it is ok to do\r
676                                                         this from within the critical section - the kernel\r
677                                                         takes care of that. */\r
678                                                         queueYIELD_IF_USING_PREEMPTION();\r
679                                                 }\r
680                                                 else\r
681                                                 {\r
682                                                         mtCOVERAGE_TEST_MARKER();\r
683                                                 }\r
684                                         }\r
685                                         else\r
686                                         {\r
687                                                 mtCOVERAGE_TEST_MARKER();\r
688                                         }\r
689                                 }\r
690                                 #endif /* configUSE_QUEUE_SETS */\r
691 \r
692                                 taskEXIT_CRITICAL();\r
693 \r
694                                 /* Return to the original privilege level before exiting the\r
695                                 function. */\r
696                                 return pdPASS;\r
697                         }\r
698                         else\r
699                         {\r
700                                 if( xTicksToWait == ( TickType_t ) 0 )\r
701                                 {\r
702                                         /* The queue was full and no block time is specified (or\r
703                                         the block time has expired) so leave now. */\r
704                                         taskEXIT_CRITICAL();\r
705 \r
706                                         /* Return to the original privilege level before exiting\r
707                                         the function. */\r
708                                         traceQUEUE_SEND_FAILED( pxQueue );\r
709                                         return errQUEUE_FULL;\r
710                                 }\r
711                                 else if( xEntryTimeSet == pdFALSE )\r
712                                 {\r
713                                         /* The queue was full and a block time was specified so\r
714                                         configure the timeout structure. */\r
715                                         vTaskSetTimeOutState( &xTimeOut );\r
716                                         xEntryTimeSet = pdTRUE;\r
717                                 }\r
718                                 else\r
719                                 {\r
720                                         /* Entry time was already set. */\r
721                                         mtCOVERAGE_TEST_MARKER();\r
722                                 }\r
723                         }\r
724                 }\r
725                 taskEXIT_CRITICAL();\r
726 \r
727                 /* Interrupts and other tasks can send to and receive from the queue\r
728                 now the critical section has been exited. */\r
729 \r
730                 vTaskSuspendAll();\r
731                 prvLockQueue( pxQueue );\r
732 \r
733                 /* Update the timeout state to see if it has expired yet. */\r
734                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
735                 {\r
736                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
737                         {\r
738                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
739                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
740 \r
741                                 /* Unlocking the queue means queue events can effect the\r
742                                 event list.  It is possible     that interrupts occurring now\r
743                                 remove this task from the event list again - but as the\r
744                                 scheduler is suspended the task will go onto the pending\r
745                                 ready last instead of the actual ready list. */\r
746                                 prvUnlockQueue( pxQueue );\r
747 \r
748                                 /* Resuming the scheduler will move tasks from the pending\r
749                                 ready list into the ready list - so it is feasible that this\r
750                                 task is already in a ready list before it yields - in which\r
751                                 case the yield will not cause a context switch unless there\r
752                                 is also a higher priority task in the pending ready list. */\r
753                                 if( xTaskResumeAll() == pdFALSE )\r
754                                 {\r
755                                         portYIELD_WITHIN_API();\r
756                                 }\r
757                         }\r
758                         else\r
759                         {\r
760                                 /* Try again. */\r
761                                 prvUnlockQueue( pxQueue );\r
762                                 ( void ) xTaskResumeAll();\r
763                         }\r
764                 }\r
765                 else\r
766                 {\r
767                         /* The timeout has expired. */\r
768                         prvUnlockQueue( pxQueue );\r
769                         ( void ) xTaskResumeAll();\r
770 \r
771                         /* Return to the original privilege level before exiting the\r
772                         function. */\r
773                         traceQUEUE_SEND_FAILED( pxQueue );\r
774                         return errQUEUE_FULL;\r
775                 }\r
776         }\r
777 }\r
778 /*-----------------------------------------------------------*/\r
779 \r
780 #if ( configUSE_ALTERNATIVE_API == 1 )\r
781 \r
782         BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )\r
783         {\r
784         BaseType_t xEntryTimeSet = pdFALSE;\r
785         TimeOut_t xTimeOut;\r
786         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
787 \r
788                 configASSERT( pxQueue );\r
789                 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
790 \r
791                 for( ;; )\r
792                 {\r
793                         taskENTER_CRITICAL();\r
794                         {\r
795                                 /* Is there room on the queue now?  To be running we must be\r
796                                 the highest priority task wanting to access the queue. */\r
797                                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
798                                 {\r
799                                         traceQUEUE_SEND( pxQueue );\r
800                                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
801 \r
802                                         /* If there was a task waiting for data to arrive on the\r
803                                         queue then unblock it now. */\r
804                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
805                                         {\r
806                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
807                                                 {\r
808                                                         /* The unblocked task has a priority higher than\r
809                                                         our own so yield immediately. */\r
810                                                         portYIELD_WITHIN_API();\r
811                                                 }\r
812                                                 else\r
813                                                 {\r
814                                                         mtCOVERAGE_TEST_MARKER();\r
815                                                 }\r
816                                         }\r
817                                         else\r
818                                         {\r
819                                                 mtCOVERAGE_TEST_MARKER();\r
820                                         }\r
821 \r
822                                         taskEXIT_CRITICAL();\r
823                                         return pdPASS;\r
824                                 }\r
825                                 else\r
826                                 {\r
827                                         if( xTicksToWait == ( TickType_t ) 0 )\r
828                                         {\r
829                                                 taskEXIT_CRITICAL();\r
830                                                 return errQUEUE_FULL;\r
831                                         }\r
832                                         else if( xEntryTimeSet == pdFALSE )\r
833                                         {\r
834                                                 vTaskSetTimeOutState( &xTimeOut );\r
835                                                 xEntryTimeSet = pdTRUE;\r
836                                         }\r
837                                 }\r
838                         }\r
839                         taskEXIT_CRITICAL();\r
840 \r
841                         taskENTER_CRITICAL();\r
842                         {\r
843                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
844                                 {\r
845                                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
846                                         {\r
847                                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
848                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
849                                                 portYIELD_WITHIN_API();\r
850                                         }\r
851                                         else\r
852                                         {\r
853                                                 mtCOVERAGE_TEST_MARKER();\r
854                                         }\r
855                                 }\r
856                                 else\r
857                                 {\r
858                                         taskEXIT_CRITICAL();\r
859                                         traceQUEUE_SEND_FAILED( pxQueue );\r
860                                         return errQUEUE_FULL;\r
861                                 }\r
862                         }\r
863                         taskEXIT_CRITICAL();\r
864                 }\r
865         }\r
866 \r
867 #endif /* configUSE_ALTERNATIVE_API */\r
868 /*-----------------------------------------------------------*/\r
869 \r
870 #if ( configUSE_ALTERNATIVE_API == 1 )\r
871 \r
872         BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )\r
873         {\r
874         BaseType_t xEntryTimeSet = pdFALSE;\r
875         TimeOut_t xTimeOut;\r
876         int8_t *pcOriginalReadPosition;\r
877         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
878 \r
879                 configASSERT( pxQueue );\r
880                 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
881 \r
882                 for( ;; )\r
883                 {\r
884                         taskENTER_CRITICAL();\r
885                         {\r
886                                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
887                                 {\r
888                                         /* Remember our read position in case we are just peeking. */\r
889                                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
890 \r
891                                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
892 \r
893                                         if( xJustPeeking == pdFALSE )\r
894                                         {\r
895                                                 traceQUEUE_RECEIVE( pxQueue );\r
896 \r
897                                                 /* Data is actually being removed (not just peeked). */\r
898                                                 --( pxQueue->uxMessagesWaiting );\r
899 \r
900                                                 #if ( configUSE_MUTEXES == 1 )\r
901                                                 {\r
902                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
903                                                         {\r
904                                                                 /* Record the information required to implement\r
905                                                                 priority inheritance should it become necessary. */\r
906                                                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();\r
907                                                         }\r
908                                                         else\r
909                                                         {\r
910                                                                 mtCOVERAGE_TEST_MARKER();\r
911                                                         }\r
912                                                 }\r
913                                                 #endif\r
914 \r
915                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
916                                                 {\r
917                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
918                                                         {\r
919                                                                 portYIELD_WITHIN_API();\r
920                                                         }\r
921                                                         else\r
922                                                         {\r
923                                                                 mtCOVERAGE_TEST_MARKER();\r
924                                                         }\r
925                                                 }\r
926                                         }\r
927                                         else\r
928                                         {\r
929                                                 traceQUEUE_PEEK( pxQueue );\r
930 \r
931                                                 /* We are not removing the data, so reset our read\r
932                                                 pointer. */\r
933                                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
934 \r
935                                                 /* The data is being left in the queue, so see if there are\r
936                                                 any other tasks waiting for the data. */\r
937                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
938                                                 {\r
939                                                         /* Tasks that are removed from the event list will get added to\r
940                                                         the pending ready list as the scheduler is still suspended. */\r
941                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
942                                                         {\r
943                                                                 /* The task waiting has a higher priority than this task. */\r
944                                                                 portYIELD_WITHIN_API();\r
945                                                         }\r
946                                                         else\r
947                                                         {\r
948                                                                 mtCOVERAGE_TEST_MARKER();\r
949                                                         }\r
950                                                 }\r
951                                                 else\r
952                                                 {\r
953                                                         mtCOVERAGE_TEST_MARKER();\r
954                                                 }\r
955                                         }\r
956 \r
957                                         taskEXIT_CRITICAL();\r
958                                         return pdPASS;\r
959                                 }\r
960                                 else\r
961                                 {\r
962                                         if( xTicksToWait == ( TickType_t ) 0 )\r
963                                         {\r
964                                                 taskEXIT_CRITICAL();\r
965                                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
966                                                 return errQUEUE_EMPTY;\r
967                                         }\r
968                                         else if( xEntryTimeSet == pdFALSE )\r
969                                         {\r
970                                                 vTaskSetTimeOutState( &xTimeOut );\r
971                                                 xEntryTimeSet = pdTRUE;\r
972                                         }\r
973                                 }\r
974                         }\r
975                         taskEXIT_CRITICAL();\r
976 \r
977                         taskENTER_CRITICAL();\r
978                         {\r
979                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
980                                 {\r
981                                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
982                                         {\r
983                                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
984 \r
985                                                 #if ( configUSE_MUTEXES == 1 )\r
986                                                 {\r
987                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
988                                                         {\r
989                                                                 taskENTER_CRITICAL();\r
990                                                                 {\r
991                                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
992                                                                 }\r
993                                                                 taskEXIT_CRITICAL();\r
994                                                         }\r
995                                                         else\r
996                                                         {\r
997                                                                 mtCOVERAGE_TEST_MARKER();\r
998                                                         }\r
999                                                 }\r
1000                                                 #endif\r
1001 \r
1002                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1003                                                 portYIELD_WITHIN_API();\r
1004                                         }\r
1005                                         else\r
1006                                         {\r
1007                                                 mtCOVERAGE_TEST_MARKER();\r
1008                                         }\r
1009                                 }\r
1010                                 else\r
1011                                 {\r
1012                                         taskEXIT_CRITICAL();\r
1013                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1014                                         return errQUEUE_EMPTY;\r
1015                                 }\r
1016                         }\r
1017                         taskEXIT_CRITICAL();\r
1018                 }\r
1019         }\r
1020 \r
1021 \r
1022 #endif /* configUSE_ALTERNATIVE_API */\r
1023 /*-----------------------------------------------------------*/\r
1024 \r
1025 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
1026 {\r
1027 BaseType_t xReturn;\r
1028 UBaseType_t uxSavedInterruptStatus;\r
1029 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1030 \r
1031         configASSERT( pxQueue );\r
1032         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1033         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
1034 \r
1035         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1036         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1037         above the maximum system call priority are kept permanently enabled, even\r
1038         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1039         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1040         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1041         failure if a FreeRTOS API function is called from an interrupt that has been\r
1042         assigned a priority above the configured maximum system call priority.\r
1043         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1044         that have been assigned a priority at or (logically) below the maximum\r
1045         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1046         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1047         More information (albeit Cortex-M specific) is provided on the following\r
1048         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1049         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1050 \r
1051         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
1052         in the queue.  Also don't directly wake a task that was blocked on a queue\r
1053         read, instead return a flag to say whether a context switch is required or\r
1054         not (i.e. has a task with a higher priority than us been woken by this\r
1055         post). */\r
1056         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1057         {\r
1058                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
1059                 {\r
1060                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1061 \r
1062                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1063 \r
1064                         /* The event list is not altered if the queue is locked.  This will\r
1065                         be done when the queue is unlocked later. */\r
1066                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1067                         {\r
1068                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1069                                 {\r
1070                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1071                                         {\r
1072                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
1073                                                 {\r
1074                                                         /* The queue is a member of a queue set, and posting\r
1075                                                         to the queue set caused a higher priority task to\r
1076                                                         unblock.  A context switch is required. */\r
1077                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1078                                                         {\r
1079                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1080                                                         }\r
1081                                                         else\r
1082                                                         {\r
1083                                                                 mtCOVERAGE_TEST_MARKER();\r
1084                                                         }\r
1085                                                 }\r
1086                                                 else\r
1087                                                 {\r
1088                                                         mtCOVERAGE_TEST_MARKER();\r
1089                                                 }\r
1090                                         }\r
1091                                         else\r
1092                                         {\r
1093                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1094                                                 {\r
1095                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1096                                                         {\r
1097                                                                 /* The task waiting has a higher priority so record that a\r
1098                                                                 context switch is required. */\r
1099                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1100                                                                 {\r
1101                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1102                                                                 }\r
1103                                                                 else\r
1104                                                                 {\r
1105                                                                         mtCOVERAGE_TEST_MARKER();\r
1106                                                                 }\r
1107                                                         }\r
1108                                                         else\r
1109                                                         {\r
1110                                                                 mtCOVERAGE_TEST_MARKER();\r
1111                                                         }\r
1112                                                 }\r
1113                                                 else\r
1114                                                 {\r
1115                                                         mtCOVERAGE_TEST_MARKER();\r
1116                                                 }\r
1117                                         }\r
1118                                 }\r
1119                                 #else /* configUSE_QUEUE_SETS */\r
1120                                 {\r
1121                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1122                                         {\r
1123                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1124                                                 {\r
1125                                                         /* The task waiting has a higher priority so record that a\r
1126                                                         context switch is required. */\r
1127                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1128                                                         {\r
1129                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1130                                                         }\r
1131                                                         else\r
1132                                                         {\r
1133                                                                 mtCOVERAGE_TEST_MARKER();\r
1134                                                         }\r
1135                                                 }\r
1136                                                 else\r
1137                                                 {\r
1138                                                         mtCOVERAGE_TEST_MARKER();\r
1139                                                 }\r
1140                                         }\r
1141                                         else\r
1142                                         {\r
1143                                                 mtCOVERAGE_TEST_MARKER();\r
1144                                         }\r
1145                                 }\r
1146                                 #endif /* configUSE_QUEUE_SETS */\r
1147                         }\r
1148                         else\r
1149                         {\r
1150                                 /* Increment the lock count so the task that unlocks the queue\r
1151                                 knows that data was posted while it was locked. */\r
1152                                 ++( pxQueue->xTxLock );\r
1153                         }\r
1154 \r
1155                         xReturn = pdPASS;\r
1156                 }\r
1157                 else\r
1158                 {\r
1159                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1160                         xReturn = errQUEUE_FULL;\r
1161                 }\r
1162         }\r
1163         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1164 \r
1165         return xReturn;\r
1166 }\r
1167 /*-----------------------------------------------------------*/\r
1168 \r
1169 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1170 {\r
1171 BaseType_t xEntryTimeSet = pdFALSE;\r
1172 TimeOut_t xTimeOut;\r
1173 int8_t *pcOriginalReadPosition;\r
1174 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1175 \r
1176         configASSERT( pxQueue );\r
1177         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1178         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1179         {\r
1180                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1181         }\r
1182         #endif\r
1183 \r
1184         /* This function relaxes the coding standard somewhat to allow return\r
1185         statements within the function itself.  This is done in the interest\r
1186         of execution time efficiency. */\r
1187 \r
1188         for( ;; )\r
1189         {\r
1190                 taskENTER_CRITICAL();\r
1191                 {\r
1192                         /* Is there data in the queue now?  To be running we must be\r
1193                         the highest priority task wanting to access the queue. */\r
1194                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1195                         {\r
1196                                 /* Remember the read position in case the queue is only being\r
1197                                 peeked. */\r
1198                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1199 \r
1200                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1201 \r
1202                                 if( xJustPeeking == pdFALSE )\r
1203                                 {\r
1204                                         traceQUEUE_RECEIVE( pxQueue );\r
1205 \r
1206                                         /* Actually removing data, not just peeking. */\r
1207                                         --( pxQueue->uxMessagesWaiting );\r
1208 \r
1209                                         #if ( configUSE_MUTEXES == 1 )\r
1210                                         {\r
1211                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1212                                                 {\r
1213                                                         /* Record the information required to implement\r
1214                                                         priority inheritance should it become necessary. */\r
1215                                                         pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1216                                                 }\r
1217                                                 else\r
1218                                                 {\r
1219                                                         mtCOVERAGE_TEST_MARKER();\r
1220                                                 }\r
1221                                         }\r
1222                                         #endif\r
1223 \r
1224                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1225                                         {\r
1226                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1227                                                 {\r
1228                                                         queueYIELD_IF_USING_PREEMPTION();\r
1229                                                 }\r
1230                                                 else\r
1231                                                 {\r
1232                                                         mtCOVERAGE_TEST_MARKER();\r
1233                                                 }\r
1234                                         }\r
1235                                         else\r
1236                                         {\r
1237                                                 mtCOVERAGE_TEST_MARKER();\r
1238                                         }\r
1239                                 }\r
1240                                 else\r
1241                                 {\r
1242                                         traceQUEUE_PEEK( pxQueue );\r
1243 \r
1244                                         /* The data is not being removed, so reset the read\r
1245                                         pointer. */\r
1246                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1247 \r
1248                                         /* The data is being left in the queue, so see if there are\r
1249                                         any other tasks waiting for the data. */\r
1250                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1251                                         {\r
1252                                                 /* Tasks that are removed from the event list will get added to\r
1253                                                 the pending ready list as the scheduler is still suspended. */\r
1254                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1255                                                 {\r
1256                                                         /* The task waiting has a higher priority than this task. */\r
1257                                                         queueYIELD_IF_USING_PREEMPTION();\r
1258                                                 }\r
1259                                                 else\r
1260                                                 {\r
1261                                                         mtCOVERAGE_TEST_MARKER();\r
1262                                                 }\r
1263                                         }\r
1264                                         else\r
1265                                         {\r
1266                                                 mtCOVERAGE_TEST_MARKER();\r
1267                                         }\r
1268                                 }\r
1269 \r
1270                                 taskEXIT_CRITICAL();\r
1271                                 return pdPASS;\r
1272                         }\r
1273                         else\r
1274                         {\r
1275                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1276                                 {\r
1277                                         /* The queue was empty and no block time is specified (or\r
1278                                         the block time has expired) so leave now. */\r
1279                                         taskEXIT_CRITICAL();\r
1280                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1281                                         return errQUEUE_EMPTY;\r
1282                                 }\r
1283                                 else if( xEntryTimeSet == pdFALSE )\r
1284                                 {\r
1285                                         /* The queue was empty and a block time was specified so\r
1286                                         configure the timeout structure. */\r
1287                                         vTaskSetTimeOutState( &xTimeOut );\r
1288                                         xEntryTimeSet = pdTRUE;\r
1289                                 }\r
1290                                 else\r
1291                                 {\r
1292                                         /* Entry time was already set. */\r
1293                                         mtCOVERAGE_TEST_MARKER();\r
1294                                 }\r
1295                         }\r
1296                 }\r
1297                 taskEXIT_CRITICAL();\r
1298 \r
1299                 /* Interrupts and other tasks can send to and receive from the queue\r
1300                 now the critical section has been exited. */\r
1301 \r
1302                 vTaskSuspendAll();\r
1303                 prvLockQueue( pxQueue );\r
1304 \r
1305                 /* Update the timeout state to see if it has expired yet. */\r
1306                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1307                 {\r
1308                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1309                         {\r
1310                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1311 \r
1312                                 #if ( configUSE_MUTEXES == 1 )\r
1313                                 {\r
1314                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1315                                         {\r
1316                                                 taskENTER_CRITICAL();\r
1317                                                 {\r
1318                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1319                                                 }\r
1320                                                 taskEXIT_CRITICAL();\r
1321                                         }\r
1322                                         else\r
1323                                         {\r
1324                                                 mtCOVERAGE_TEST_MARKER();\r
1325                                         }\r
1326                                 }\r
1327                                 #endif\r
1328 \r
1329                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1330                                 prvUnlockQueue( pxQueue );\r
1331                                 if( xTaskResumeAll() == pdFALSE )\r
1332                                 {\r
1333                                         portYIELD_WITHIN_API();\r
1334                                 }\r
1335                                 else\r
1336                                 {\r
1337                                         mtCOVERAGE_TEST_MARKER();\r
1338                                 }\r
1339                         }\r
1340                         else\r
1341                         {\r
1342                                 /* Try again. */\r
1343                                 prvUnlockQueue( pxQueue );\r
1344                                 ( void ) xTaskResumeAll();\r
1345                         }\r
1346                 }\r
1347                 else\r
1348                 {\r
1349                         prvUnlockQueue( pxQueue );\r
1350                         ( void ) xTaskResumeAll();\r
1351                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1352                         return errQUEUE_EMPTY;\r
1353                 }\r
1354         }\r
1355 }\r
1356 /*-----------------------------------------------------------*/\r
1357 \r
1358 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1359 {\r
1360 BaseType_t xReturn;\r
1361 UBaseType_t uxSavedInterruptStatus;\r
1362 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1363 \r
1364         configASSERT( pxQueue );\r
1365         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1366 \r
1367         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1368         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1369         above the maximum system call priority are kept permanently enabled, even\r
1370         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1371         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1372         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1373         failure if a FreeRTOS API function is called from an interrupt that has been\r
1374         assigned a priority above the configured maximum system call priority.\r
1375         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1376         that have been assigned a priority at or (logically) below the maximum\r
1377         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1378         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1379         More information (albeit Cortex-M specific) is provided on the following\r
1380         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1381         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1382 \r
1383         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1384         {\r
1385                 /* Cannot block in an ISR, so check there is data available. */\r
1386                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1387                 {\r
1388                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1389 \r
1390                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1391                         --( pxQueue->uxMessagesWaiting );\r
1392 \r
1393                         /* If the queue is locked the event list will not be modified.\r
1394                         Instead update the lock count so the task that unlocks the queue\r
1395                         will know that an ISR has removed data while the queue was\r
1396                         locked. */\r
1397                         if( pxQueue->xRxLock == queueUNLOCKED )\r
1398                         {\r
1399                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1400                                 {\r
1401                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1402                                         {\r
1403                                                 /* The task waiting has a higher priority than us so\r
1404                                                 force a context switch. */\r
1405                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1406                                                 {\r
1407                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1408                                                 }\r
1409                                                 else\r
1410                                                 {\r
1411                                                         mtCOVERAGE_TEST_MARKER();\r
1412                                                 }\r
1413                                         }\r
1414                                         else\r
1415                                         {\r
1416                                                 mtCOVERAGE_TEST_MARKER();\r
1417                                         }\r
1418                                 }\r
1419                                 else\r
1420                                 {\r
1421                                         mtCOVERAGE_TEST_MARKER();\r
1422                                 }\r
1423                         }\r
1424                         else\r
1425                         {\r
1426                                 /* Increment the lock count so the task that unlocks the queue\r
1427                                 knows that data was removed while it was locked. */\r
1428                                 ++( pxQueue->xRxLock );\r
1429                         }\r
1430 \r
1431                         xReturn = pdPASS;\r
1432                 }\r
1433                 else\r
1434                 {\r
1435                         xReturn = pdFAIL;\r
1436                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1437                 }\r
1438         }\r
1439         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1440 \r
1441         return xReturn;\r
1442 }\r
1443 /*-----------------------------------------------------------*/\r
1444 \r
1445 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1446 {\r
1447 BaseType_t xReturn;\r
1448 UBaseType_t uxSavedInterruptStatus;\r
1449 int8_t *pcOriginalReadPosition;\r
1450 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1451 \r
1452         configASSERT( pxQueue );\r
1453         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1454 \r
1455         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1456         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1457         above the maximum system call priority are kept permanently enabled, even\r
1458         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1459         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1460         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1461         failure if a FreeRTOS API function is called from an interrupt that has been\r
1462         assigned a priority above the configured maximum system call priority.\r
1463         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1464         that have been assigned a priority at or (logically) below the maximum\r
1465         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1466         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1467         More information (albeit Cortex-M specific) is provided on the following\r
1468         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1469         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1470 \r
1471         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1472         {\r
1473                 /* Cannot block in an ISR, so check there is data available. */\r
1474                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1475                 {\r
1476                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1477 \r
1478                         /* Remember the read position so it can be reset as nothing is\r
1479                         actually being removed from the queue. */\r
1480                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1481                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1482                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1483 \r
1484                         xReturn = pdPASS;\r
1485                 }\r
1486                 else\r
1487                 {\r
1488                         xReturn = pdFAIL;\r
1489                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1490                 }\r
1491         }\r
1492         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1493 \r
1494         return xReturn;\r
1495 }\r
1496 /*-----------------------------------------------------------*/\r
1497 \r
1498 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1499 {\r
1500 UBaseType_t uxReturn;\r
1501 \r
1502         configASSERT( xQueue );\r
1503 \r
1504         taskENTER_CRITICAL();\r
1505         {\r
1506                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1507         }\r
1508         taskEXIT_CRITICAL();\r
1509 \r
1510         return uxReturn;\r
1511 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1512 /*-----------------------------------------------------------*/\r
1513 \r
1514 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1515 {\r
1516 UBaseType_t uxReturn;\r
1517 Queue_t *pxQueue;\r
1518 \r
1519         pxQueue = ( Queue_t * ) xQueue;\r
1520         configASSERT( pxQueue );\r
1521 \r
1522         taskENTER_CRITICAL();\r
1523         {\r
1524                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1525         }\r
1526         taskEXIT_CRITICAL();\r
1527 \r
1528         return uxReturn;\r
1529 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1530 /*-----------------------------------------------------------*/\r
1531 \r
1532 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1533 {\r
1534 UBaseType_t uxReturn;\r
1535 \r
1536         configASSERT( xQueue );\r
1537 \r
1538         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1539 \r
1540         return uxReturn;\r
1541 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1542 /*-----------------------------------------------------------*/\r
1543 \r
1544 void vQueueDelete( QueueHandle_t xQueue )\r
1545 {\r
1546 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1547 \r
1548         configASSERT( pxQueue );\r
1549 \r
1550         traceQUEUE_DELETE( pxQueue );\r
1551         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1552         {\r
1553                 vQueueUnregisterQueue( pxQueue );\r
1554         }\r
1555         #endif\r
1556         if( pxQueue->pcHead != NULL )\r
1557         {\r
1558                 vPortFree( pxQueue->pcHead );\r
1559         }\r
1560         vPortFree( pxQueue );\r
1561 }\r
1562 /*-----------------------------------------------------------*/\r
1563 \r
1564 #if ( configUSE_TRACE_FACILITY == 1 )\r
1565 \r
1566         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1567         {\r
1568                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1569         }\r
1570 \r
1571 #endif /* configUSE_TRACE_FACILITY */\r
1572 /*-----------------------------------------------------------*/\r
1573 \r
1574 #if ( configUSE_TRACE_FACILITY == 1 )\r
1575 \r
1576         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1577         {\r
1578                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1579         }\r
1580 \r
1581 #endif /* configUSE_TRACE_FACILITY */\r
1582 /*-----------------------------------------------------------*/\r
1583 \r
1584 #if ( configUSE_TRACE_FACILITY == 1 )\r
1585 \r
1586         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1587         {\r
1588                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1589         }\r
1590 \r
1591 #endif /* configUSE_TRACE_FACILITY */\r
1592 /*-----------------------------------------------------------*/\r
1593 \r
1594 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1595 {\r
1596         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1597         {\r
1598                 #if ( configUSE_MUTEXES == 1 )\r
1599                 {\r
1600                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1601                         {\r
1602                                 /* The mutex is no longer being held. */\r
1603                                 vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1604                                 pxQueue->pxMutexHolder = NULL;\r
1605                         }\r
1606                         else\r
1607                         {\r
1608                                 mtCOVERAGE_TEST_MARKER();\r
1609                         }\r
1610                 }\r
1611                 #endif /* configUSE_MUTEXES */\r
1612         }\r
1613         else if( xPosition == queueSEND_TO_BACK )\r
1614         {\r
1615                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1616                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1617                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1618                 {\r
1619                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1620                 }\r
1621                 else\r
1622                 {\r
1623                         mtCOVERAGE_TEST_MARKER();\r
1624                 }\r
1625         }\r
1626         else\r
1627         {\r
1628                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1629                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1630                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1631                 {\r
1632                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1633                 }\r
1634                 else\r
1635                 {\r
1636                         mtCOVERAGE_TEST_MARKER();\r
1637                 }\r
1638 \r
1639                 if( xPosition == queueOVERWRITE )\r
1640                 {\r
1641                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1642                         {\r
1643                                 /* An item is not being added but overwritten, so subtract\r
1644                                 one from the recorded number of items in the queue so when\r
1645                                 one is added again below the number of recorded items remains\r
1646                                 correct. */\r
1647                                 --( pxQueue->uxMessagesWaiting );\r
1648                         }\r
1649                         else\r
1650                         {\r
1651                                 mtCOVERAGE_TEST_MARKER();\r
1652                         }\r
1653                 }\r
1654                 else\r
1655                 {\r
1656                         mtCOVERAGE_TEST_MARKER();\r
1657                 }\r
1658         }\r
1659 \r
1660         ++( pxQueue->uxMessagesWaiting );\r
1661 }\r
1662 /*-----------------------------------------------------------*/\r
1663 \r
1664 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
1665 {\r
1666         if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )\r
1667         {\r
1668                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1669                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
1670                 {\r
1671                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1672                 }\r
1673                 else\r
1674                 {\r
1675                         mtCOVERAGE_TEST_MARKER();\r
1676                 }\r
1677                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
1678         }\r
1679         else\r
1680         {\r
1681                 mtCOVERAGE_TEST_MARKER();\r
1682         }\r
1683 }\r
1684 /*-----------------------------------------------------------*/\r
1685 \r
1686 static void prvUnlockQueue( Queue_t * const pxQueue )\r
1687 {\r
1688         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
1689 \r
1690         /* The lock counts contains the number of extra data items placed or\r
1691         removed from the queue while the queue was locked.  When a queue is\r
1692         locked items can be added or removed, but the event lists cannot be\r
1693         updated. */\r
1694         taskENTER_CRITICAL();\r
1695         {\r
1696                 /* See if data was added to the queue while it was locked. */\r
1697                 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
1698                 {\r
1699                         /* Data was posted while the queue was locked.  Are any tasks\r
1700                         blocked waiting for data to become available? */\r
1701                         #if ( configUSE_QUEUE_SETS == 1 )\r
1702                         {\r
1703                                 if( pxQueue->pxQueueSetContainer != NULL )\r
1704                                 {\r
1705                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1706                                         {\r
1707                                                 /* The queue is a member of a queue set, and posting to\r
1708                                                 the queue set caused a higher priority task to unblock.\r
1709                                                 A context switch is required. */\r
1710                                                 vTaskMissedYield();\r
1711                                         }\r
1712                                         else\r
1713                                         {\r
1714                                                 mtCOVERAGE_TEST_MARKER();\r
1715                                         }\r
1716                                 }\r
1717                                 else\r
1718                                 {\r
1719                                         /* Tasks that are removed from the event list will get added to\r
1720                                         the pending ready list as the scheduler is still suspended. */\r
1721                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1722                                         {\r
1723                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1724                                                 {\r
1725                                                         /* The task waiting has a higher priority so record that a\r
1726                                                         context switch is required. */\r
1727                                                         vTaskMissedYield();\r
1728                                                 }\r
1729                                                 else\r
1730                                                 {\r
1731                                                         mtCOVERAGE_TEST_MARKER();\r
1732                                                 }\r
1733                                         }\r
1734                                         else\r
1735                                         {\r
1736                                                 break;\r
1737                                         }\r
1738                                 }\r
1739                         }\r
1740                         #else /* configUSE_QUEUE_SETS */\r
1741                         {\r
1742                                 /* Tasks that are removed from the event list will get added to\r
1743                                 the pending ready list as the scheduler is still suspended. */\r
1744                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1745                                 {\r
1746                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1747                                         {\r
1748                                                 /* The task waiting has a higher priority so record that a\r
1749                                                 context switch is required. */\r
1750                                                 vTaskMissedYield();\r
1751                                         }\r
1752                                         else\r
1753                                         {\r
1754                                                 mtCOVERAGE_TEST_MARKER();\r
1755                                         }\r
1756                                 }\r
1757                                 else\r
1758                                 {\r
1759                                         break;\r
1760                                 }\r
1761                         }\r
1762                         #endif /* configUSE_QUEUE_SETS */\r
1763 \r
1764                         --( pxQueue->xTxLock );\r
1765                 }\r
1766 \r
1767                 pxQueue->xTxLock = queueUNLOCKED;\r
1768         }\r
1769         taskEXIT_CRITICAL();\r
1770 \r
1771         /* Do the same for the Rx lock. */\r
1772         taskENTER_CRITICAL();\r
1773         {\r
1774                 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
1775                 {\r
1776                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1777                         {\r
1778                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1779                                 {\r
1780                                         vTaskMissedYield();\r
1781                                 }\r
1782                                 else\r
1783                                 {\r
1784                                         mtCOVERAGE_TEST_MARKER();\r
1785                                 }\r
1786 \r
1787                                 --( pxQueue->xRxLock );\r
1788                         }\r
1789                         else\r
1790                         {\r
1791                                 break;\r
1792                         }\r
1793                 }\r
1794 \r
1795                 pxQueue->xRxLock = queueUNLOCKED;\r
1796         }\r
1797         taskEXIT_CRITICAL();\r
1798 }\r
1799 /*-----------------------------------------------------------*/\r
1800 \r
1801 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
1802 {\r
1803 BaseType_t xReturn;\r
1804 \r
1805         taskENTER_CRITICAL();\r
1806         {\r
1807                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
1808                 {\r
1809                         xReturn = pdTRUE;\r
1810                 }\r
1811                 else\r
1812                 {\r
1813                         xReturn = pdFALSE;\r
1814                 }\r
1815         }\r
1816         taskEXIT_CRITICAL();\r
1817 \r
1818         return xReturn;\r
1819 }\r
1820 /*-----------------------------------------------------------*/\r
1821 \r
1822 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
1823 {\r
1824 BaseType_t xReturn;\r
1825 \r
1826         configASSERT( xQueue );\r
1827         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
1828         {\r
1829                 xReturn = pdTRUE;\r
1830         }\r
1831         else\r
1832         {\r
1833                 xReturn = pdFALSE;\r
1834         }\r
1835 \r
1836         return xReturn;\r
1837 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
1838 /*-----------------------------------------------------------*/\r
1839 \r
1840 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
1841 {\r
1842 BaseType_t xReturn;\r
1843 \r
1844         taskENTER_CRITICAL();\r
1845         {\r
1846                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
1847                 {\r
1848                         xReturn = pdTRUE;\r
1849                 }\r
1850                 else\r
1851                 {\r
1852                         xReturn = pdFALSE;\r
1853                 }\r
1854         }\r
1855         taskEXIT_CRITICAL();\r
1856 \r
1857         return xReturn;\r
1858 }\r
1859 /*-----------------------------------------------------------*/\r
1860 \r
1861 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
1862 {\r
1863 BaseType_t xReturn;\r
1864 \r
1865         configASSERT( xQueue );\r
1866         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
1867         {\r
1868                 xReturn = pdTRUE;\r
1869         }\r
1870         else\r
1871         {\r
1872                 xReturn = pdFALSE;\r
1873         }\r
1874 \r
1875         return xReturn;\r
1876 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
1877 /*-----------------------------------------------------------*/\r
1878 \r
1879 #if ( configUSE_CO_ROUTINES == 1 )\r
1880 \r
1881         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
1882         {\r
1883         BaseType_t xReturn;\r
1884         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1885 \r
1886                 /* If the queue is already full we may have to block.  A critical section\r
1887                 is required to prevent an interrupt removing something from the queue\r
1888                 between the check to see if the queue is full and blocking on the queue. */\r
1889                 portDISABLE_INTERRUPTS();\r
1890                 {\r
1891                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
1892                         {\r
1893                                 /* The queue is full - do we want to block or just leave without\r
1894                                 posting? */\r
1895                                 if( xTicksToWait > ( TickType_t ) 0 )\r
1896                                 {\r
1897                                         /* As this is called from a coroutine we cannot block directly, but\r
1898                                         return indicating that we need to block. */\r
1899                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
1900                                         portENABLE_INTERRUPTS();\r
1901                                         return errQUEUE_BLOCKED;\r
1902                                 }\r
1903                                 else\r
1904                                 {\r
1905                                         portENABLE_INTERRUPTS();\r
1906                                         return errQUEUE_FULL;\r
1907                                 }\r
1908                         }\r
1909                 }\r
1910                 portENABLE_INTERRUPTS();\r
1911 \r
1912                 portDISABLE_INTERRUPTS();\r
1913                 {\r
1914                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
1915                         {\r
1916                                 /* There is room in the queue, copy the data into the queue. */\r
1917                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
1918                                 xReturn = pdPASS;\r
1919 \r
1920                                 /* Were any co-routines waiting for data to become available? */\r
1921                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1922                                 {\r
1923                                         /* In this instance the co-routine could be placed directly\r
1924                                         into the ready list as we are within a critical section.\r
1925                                         Instead the same pending ready list mechanism is used as if\r
1926                                         the event were caused from within an interrupt. */\r
1927                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1928                                         {\r
1929                                                 /* The co-routine waiting has a higher priority so record\r
1930                                                 that a yield might be appropriate. */\r
1931                                                 xReturn = errQUEUE_YIELD;\r
1932                                         }\r
1933                                         else\r
1934                                         {\r
1935                                                 mtCOVERAGE_TEST_MARKER();\r
1936                                         }\r
1937                                 }\r
1938                                 else\r
1939                                 {\r
1940                                         mtCOVERAGE_TEST_MARKER();\r
1941                                 }\r
1942                         }\r
1943                         else\r
1944                         {\r
1945                                 xReturn = errQUEUE_FULL;\r
1946                         }\r
1947                 }\r
1948                 portENABLE_INTERRUPTS();\r
1949 \r
1950                 return xReturn;\r
1951         }\r
1952 \r
1953 #endif /* configUSE_CO_ROUTINES */\r
1954 /*-----------------------------------------------------------*/\r
1955 \r
1956 #if ( configUSE_CO_ROUTINES == 1 )\r
1957 \r
1958         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
1959         {\r
1960         BaseType_t xReturn;\r
1961         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1962 \r
1963                 /* If the queue is already empty we may have to block.  A critical section\r
1964                 is required to prevent an interrupt adding something to the queue\r
1965                 between the check to see if the queue is empty and blocking on the queue. */\r
1966                 portDISABLE_INTERRUPTS();\r
1967                 {\r
1968                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
1969                         {\r
1970                                 /* There are no messages in the queue, do we want to block or just\r
1971                                 leave with nothing? */\r
1972                                 if( xTicksToWait > ( TickType_t ) 0 )\r
1973                                 {\r
1974                                         /* As this is a co-routine we cannot block directly, but return\r
1975                                         indicating that we need to block. */\r
1976                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
1977                                         portENABLE_INTERRUPTS();\r
1978                                         return errQUEUE_BLOCKED;\r
1979                                 }\r
1980                                 else\r
1981                                 {\r
1982                                         portENABLE_INTERRUPTS();\r
1983                                         return errQUEUE_FULL;\r
1984                                 }\r
1985                         }\r
1986                         else\r
1987                         {\r
1988                                 mtCOVERAGE_TEST_MARKER();\r
1989                         }\r
1990                 }\r
1991                 portENABLE_INTERRUPTS();\r
1992 \r
1993                 portDISABLE_INTERRUPTS();\r
1994                 {\r
1995                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1996                         {\r
1997                                 /* Data is available from the queue. */\r
1998                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1999                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2000                                 {\r
2001                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2002                                 }\r
2003                                 else\r
2004                                 {\r
2005                                         mtCOVERAGE_TEST_MARKER();\r
2006                                 }\r
2007                                 --( pxQueue->uxMessagesWaiting );\r
2008                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2009 \r
2010                                 xReturn = pdPASS;\r
2011 \r
2012                                 /* Were any co-routines waiting for space to become available? */\r
2013                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2014                                 {\r
2015                                         /* In this instance the co-routine could be placed directly\r
2016                                         into the ready list as we are within a critical section.\r
2017                                         Instead the same pending ready list mechanism is used as if\r
2018                                         the event were caused from within an interrupt. */\r
2019                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2020                                         {\r
2021                                                 xReturn = errQUEUE_YIELD;\r
2022                                         }\r
2023                                         else\r
2024                                         {\r
2025                                                 mtCOVERAGE_TEST_MARKER();\r
2026                                         }\r
2027                                 }\r
2028                                 else\r
2029                                 {\r
2030                                         mtCOVERAGE_TEST_MARKER();\r
2031                                 }\r
2032                         }\r
2033                         else\r
2034                         {\r
2035                                 xReturn = pdFAIL;\r
2036                         }\r
2037                 }\r
2038                 portENABLE_INTERRUPTS();\r
2039 \r
2040                 return xReturn;\r
2041         }\r
2042 \r
2043 #endif /* configUSE_CO_ROUTINES */\r
2044 /*-----------------------------------------------------------*/\r
2045 \r
2046 #if ( configUSE_CO_ROUTINES == 1 )\r
2047 \r
2048         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2049         {\r
2050         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2051 \r
2052                 /* Cannot block within an ISR so if there is no space on the queue then\r
2053                 exit without doing anything. */\r
2054                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2055                 {\r
2056                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2057 \r
2058                         /* We only want to wake one co-routine per ISR, so check that a\r
2059                         co-routine has not already been woken. */\r
2060                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2061                         {\r
2062                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2063                                 {\r
2064                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2065                                         {\r
2066                                                 return pdTRUE;\r
2067                                         }\r
2068                                         else\r
2069                                         {\r
2070                                                 mtCOVERAGE_TEST_MARKER();\r
2071                                         }\r
2072                                 }\r
2073                                 else\r
2074                                 {\r
2075                                         mtCOVERAGE_TEST_MARKER();\r
2076                                 }\r
2077                         }\r
2078                         else\r
2079                         {\r
2080                                 mtCOVERAGE_TEST_MARKER();\r
2081                         }\r
2082                 }\r
2083                 else\r
2084                 {\r
2085                         mtCOVERAGE_TEST_MARKER();\r
2086                 }\r
2087 \r
2088                 return xCoRoutinePreviouslyWoken;\r
2089         }\r
2090 \r
2091 #endif /* configUSE_CO_ROUTINES */\r
2092 /*-----------------------------------------------------------*/\r
2093 \r
2094 #if ( configUSE_CO_ROUTINES == 1 )\r
2095 \r
2096         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2097         {\r
2098         BaseType_t xReturn;\r
2099         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2100 \r
2101                 /* We cannot block from an ISR, so check there is data available. If\r
2102                 not then just leave without doing anything. */\r
2103                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2104                 {\r
2105                         /* Copy the data from the queue. */\r
2106                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2107                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2108                         {\r
2109                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2110                         }\r
2111                         else\r
2112                         {\r
2113                                 mtCOVERAGE_TEST_MARKER();\r
2114                         }\r
2115                         --( pxQueue->uxMessagesWaiting );\r
2116                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2117 \r
2118                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2119                         {\r
2120                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2121                                 {\r
2122                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2123                                         {\r
2124                                                 *pxCoRoutineWoken = pdTRUE;\r
2125                                         }\r
2126                                         else\r
2127                                         {\r
2128                                                 mtCOVERAGE_TEST_MARKER();\r
2129                                         }\r
2130                                 }\r
2131                                 else\r
2132                                 {\r
2133                                         mtCOVERAGE_TEST_MARKER();\r
2134                                 }\r
2135                         }\r
2136                         else\r
2137                         {\r
2138                                 mtCOVERAGE_TEST_MARKER();\r
2139                         }\r
2140 \r
2141                         xReturn = pdPASS;\r
2142                 }\r
2143                 else\r
2144                 {\r
2145                         xReturn = pdFAIL;\r
2146                 }\r
2147 \r
2148                 return xReturn;\r
2149         }\r
2150 \r
2151 #endif /* configUSE_CO_ROUTINES */\r
2152 /*-----------------------------------------------------------*/\r
2153 \r
2154 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2155 \r
2156         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2157         {\r
2158         UBaseType_t ux;\r
2159 \r
2160                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2161                 a free slot. */\r
2162                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2163                 {\r
2164                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2165                         {\r
2166                                 /* Store the information on this queue. */\r
2167                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2168                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2169 \r
2170                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2171                                 break;\r
2172                         }\r
2173                         else\r
2174                         {\r
2175                                 mtCOVERAGE_TEST_MARKER();\r
2176                         }\r
2177                 }\r
2178         }\r
2179 \r
2180 #endif /* configQUEUE_REGISTRY_SIZE */\r
2181 /*-----------------------------------------------------------*/\r
2182 \r
2183 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2184 \r
2185         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2186         {\r
2187         UBaseType_t ux;\r
2188 \r
2189                 /* See if the handle of the queue being unregistered in actually in the\r
2190                 registry. */\r
2191                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2192                 {\r
2193                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2194                         {\r
2195                                 /* Set the name to NULL to show that this slot if free again. */\r
2196                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2197                                 break;\r
2198                         }\r
2199                         else\r
2200                         {\r
2201                                 mtCOVERAGE_TEST_MARKER();\r
2202                         }\r
2203                 }\r
2204 \r
2205         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2206 \r
2207 #endif /* configQUEUE_REGISTRY_SIZE */\r
2208 /*-----------------------------------------------------------*/\r
2209 \r
2210 #if ( configUSE_TIMERS == 1 )\r
2211 \r
2212         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )\r
2213         {\r
2214         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2215 \r
2216                 /* This function should not be called by application code hence the\r
2217                 'Restricted' in its name.  It is not part of the public API.  It is\r
2218                 designed for use by kernel code, and has special calling requirements.\r
2219                 It can result in vListInsert() being called on a list that can only\r
2220                 possibly ever have one item in it, so the list will be fast, but even\r
2221                 so it should be called with the scheduler locked and not from a critical\r
2222                 section. */\r
2223 \r
2224                 /* Only do anything if there are no messages in the queue.  This function\r
2225                 will not actually cause the task to block, just place it on a blocked\r
2226                 list.  It will not block until the scheduler is unlocked - at which\r
2227                 time a yield will be performed.  If an item is added to the queue while\r
2228                 the queue is locked, and the calling task blocks on the queue, then the\r
2229                 calling task will be immediately unblocked when the queue is unlocked. */\r
2230                 prvLockQueue( pxQueue );\r
2231                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2232                 {\r
2233                         /* There is nothing in the queue, block for the specified period. */\r
2234                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
2235                 }\r
2236                 else\r
2237                 {\r
2238                         mtCOVERAGE_TEST_MARKER();\r
2239                 }\r
2240                 prvUnlockQueue( pxQueue );\r
2241         }\r
2242 \r
2243 #endif /* configUSE_TIMERS */\r
2244 /*-----------------------------------------------------------*/\r
2245 \r
2246 #if ( configUSE_QUEUE_SETS == 1 )\r
2247 \r
2248         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2249         {\r
2250         QueueSetHandle_t pxQueue;\r
2251 \r
2252                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2253 \r
2254                 return pxQueue;\r
2255         }\r
2256 \r
2257 #endif /* configUSE_QUEUE_SETS */\r
2258 /*-----------------------------------------------------------*/\r
2259 \r
2260 #if ( configUSE_QUEUE_SETS == 1 )\r
2261 \r
2262         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2263         {\r
2264         BaseType_t xReturn;\r
2265 \r
2266                 taskENTER_CRITICAL();\r
2267                 {\r
2268                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2269                         {\r
2270                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2271                                 xReturn = pdFAIL;\r
2272                         }\r
2273                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2274                         {\r
2275                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2276                                 items in the queue/semaphore. */\r
2277                                 xReturn = pdFAIL;\r
2278                         }\r
2279                         else\r
2280                         {\r
2281                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2282                                 xReturn = pdPASS;\r
2283                         }\r
2284                 }\r
2285                 taskEXIT_CRITICAL();\r
2286 \r
2287                 return xReturn;\r
2288         }\r
2289 \r
2290 #endif /* configUSE_QUEUE_SETS */\r
2291 /*-----------------------------------------------------------*/\r
2292 \r
2293 #if ( configUSE_QUEUE_SETS == 1 )\r
2294 \r
2295         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2296         {\r
2297         BaseType_t xReturn;\r
2298         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2299 \r
2300                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2301                 {\r
2302                         /* The queue was not a member of the set. */\r
2303                         xReturn = pdFAIL;\r
2304                 }\r
2305                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2306                 {\r
2307                         /* It is dangerous to remove a queue from a set when the queue is\r
2308                         not empty because the queue set will still hold pending events for\r
2309                         the queue. */\r
2310                         xReturn = pdFAIL;\r
2311                 }\r
2312                 else\r
2313                 {\r
2314                         taskENTER_CRITICAL();\r
2315                         {\r
2316                                 /* The queue is no longer contained in the set. */\r
2317                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2318                         }\r
2319                         taskEXIT_CRITICAL();\r
2320                         xReturn = pdPASS;\r
2321                 }\r
2322 \r
2323                 return xReturn;\r
2324         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2325 \r
2326 #endif /* configUSE_QUEUE_SETS */\r
2327 /*-----------------------------------------------------------*/\r
2328 \r
2329 #if ( configUSE_QUEUE_SETS == 1 )\r
2330 \r
2331         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2332         {\r
2333         QueueSetMemberHandle_t xReturn = NULL;\r
2334 \r
2335                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2336                 return xReturn;\r
2337         }\r
2338 \r
2339 #endif /* configUSE_QUEUE_SETS */\r
2340 /*-----------------------------------------------------------*/\r
2341 \r
2342 #if ( configUSE_QUEUE_SETS == 1 )\r
2343 \r
2344         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2345         {\r
2346         QueueSetMemberHandle_t xReturn = NULL;\r
2347 \r
2348                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2349                 return xReturn;\r
2350         }\r
2351 \r
2352 #endif /* configUSE_QUEUE_SETS */\r
2353 /*-----------------------------------------------------------*/\r
2354 \r
2355 #if ( configUSE_QUEUE_SETS == 1 )\r
2356 \r
2357         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2358         {\r
2359         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2360         BaseType_t xReturn = pdFALSE;\r
2361 \r
2362                 /* This function must be called form a critical section. */\r
2363 \r
2364                 configASSERT( pxQueueSetContainer );\r
2365                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2366 \r
2367                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2368                 {\r
2369                         traceQUEUE_SEND( pxQueueSetContainer );\r
2370                         /* The data copies is the handle of the queue that contains data. */\r
2371                         prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2372                         if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2373                         {\r
2374                                 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2375                                 {\r
2376                                         /* The task waiting has a higher priority */\r
2377                                         xReturn = pdTRUE;\r
2378                                 }\r
2379                                 else\r
2380                                 {\r
2381                                         mtCOVERAGE_TEST_MARKER();\r
2382                                 }\r
2383                         }\r
2384                         else\r
2385                         {\r
2386                                 mtCOVERAGE_TEST_MARKER();\r
2387                         }\r
2388                 }\r
2389                 else\r
2390                 {\r
2391                         mtCOVERAGE_TEST_MARKER();\r
2392                 }\r
2393 \r
2394                 return xReturn;\r
2395         }\r
2396 \r
2397 #endif /* configUSE_QUEUE_SETS */\r
2398 \r
2399 \r
2400 \r
2401 \r
2402 \r
2403 \r
2404 \r
2405 \r
2406 \r
2407 \r
2408 \r
2409 \r