]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Replace standard types with stdint.h types.
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V7.6.0 - Copyright (C) 2013 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     ***************************************************************************\r
8      *                                                                       *\r
9      *    FreeRTOS provides completely free yet professionally developed,    *\r
10      *    robust, strictly quality controlled, supported, and cross          *\r
11      *    platform software that has become a de facto standard.             *\r
12      *                                                                       *\r
13      *    Help yourself get started quickly and support the FreeRTOS         *\r
14      *    project by purchasing a FreeRTOS tutorial book, reference          *\r
15      *    manual, or both from: http://www.FreeRTOS.org/Documentation        *\r
16      *                                                                       *\r
17      *    Thank you!                                                         *\r
18      *                                                                       *\r
19     ***************************************************************************\r
20 \r
21     This file is part of the FreeRTOS distribution.\r
22 \r
23     FreeRTOS is free software; you can redistribute it and/or modify it under\r
24     the terms of the GNU General Public License (version 2) as published by the\r
25     Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
26 \r
27     >>! NOTE: The modification to the GPL is included to allow you to distribute\r
28     >>! a combined work that includes FreeRTOS without being obliged to provide\r
29     >>! the source code for proprietary components outside of the FreeRTOS\r
30     >>! kernel.\r
31 \r
32     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
33     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
34     FOR A PARTICULAR PURPOSE.  Full license text is available from the following\r
35     link: http://www.freertos.org/a00114.html\r
36 \r
37     1 tab == 4 spaces!\r
38 \r
39     ***************************************************************************\r
40      *                                                                       *\r
41      *    Having a problem?  Start by reading the FAQ "My application does   *\r
42      *    not run, what could be wrong?"                                     *\r
43      *                                                                       *\r
44      *    http://www.FreeRTOS.org/FAQHelp.html                               *\r
45      *                                                                       *\r
46     ***************************************************************************\r
47 \r
48     http://www.FreeRTOS.org - Documentation, books, training, latest versions,\r
49     license and Real Time Engineers Ltd. contact details.\r
50 \r
51     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
52     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
53     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
54 \r
55     http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High\r
56     Integrity Systems to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
57     licenses offer ticketed support, indemnification and middleware.\r
58 \r
59     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
60     engineered and independently SIL3 certified version for use in safety and\r
61     mission critical applications that require provable dependability.\r
62 \r
63     1 tab == 4 spaces!\r
64 */\r
65 \r
66 #include <stdlib.h>\r
67 #include <string.h>\r
68 \r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
70 all the API functions to use the MPU wrappers.  That should only be done when\r
71 task.h is included from an application file. */\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
73 \r
74 #include "FreeRTOS.h"\r
75 #include "task.h"\r
76 #include "queue.h"\r
77 \r
78 #if ( configUSE_CO_ROUTINES == 1 )\r
79         #include "croutine.h"\r
80 #endif\r
81 \r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
84 header files above, but not in this file, in order to generate the correct\r
85 privileged Vs unprivileged linkage and placement. */\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
87 \r
88 \r
89 /* Constants used with the cRxLock and xTxLock structure members. */\r
90 #define queueUNLOCKED                                   ( ( BaseType_t ) -1 )\r
91 #define queueLOCKED_UNMODIFIED                  ( ( BaseType_t ) 0 )\r
92 \r
93 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
94 pcTail members are used as pointers into the queue storage area.  When the\r
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
97 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
98 names to the pcHead and pcTail structure members to ensure the readability of\r
99 the code is maintained despite this dual use of two structure members.  An\r
100 alternative implementation would be to use a union, but use of a union is\r
101 against the coding standard (although an exception to the standard has been\r
102 permitted where the dual use also significantly changes the type of the\r
103 structure member). */\r
104 #define pxMutexHolder                                   pcTail\r
105 #define uxQueueType                                             pcHead\r
106 #define queueQUEUE_IS_MUTEX                             NULL\r
107 \r
108 /* Semaphores do not actually store or copy data, so have an item size of\r
109 zero. */\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
111 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
112 \r
113 #if( configUSE_PREEMPTION == 0 )\r
114         /* If the cooperative scheduler is being used then a yield should not be\r
115         performed just because a higher priority task has been woken. */\r
116         #define queueYIELD_IF_USING_PREEMPTION()\r
117 #else\r
118         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
119 #endif\r
120 \r
121 /*\r
122  * Definition of the queue used by the scheduler.\r
123  * Items are queued by copy, not reference.\r
124  */\r
125 typedef struct QueueDefinition\r
126 {\r
127         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
128         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
129         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
130 \r
131         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
132         {\r
133                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
134                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
135         } u;\r
136 \r
137         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
138         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
139 \r
140         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
141         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
142         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
143 \r
144         volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
145         volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
146 \r
147         #if ( configUSE_TRACE_FACILITY == 1 )\r
148                 UBaseType_t uxQueueNumber;\r
149                 uint8_t ucQueueType;\r
150         #endif\r
151 \r
152         #if ( configUSE_QUEUE_SETS == 1 )\r
153                 struct QueueDefinition *pxQueueSetContainer;\r
154         #endif\r
155 \r
156 } Queue_t;\r
157 /*-----------------------------------------------------------*/\r
158 \r
159 /*\r
160  * The queue registry is just a means for kernel aware debuggers to locate\r
161  * queue structures.  It has no other purpose so is an optional component.\r
162  */\r
163 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
164 \r
165         /* The type stored within the queue registry array.  This allows a name\r
166         to be assigned to each queue making kernel aware debugging a little\r
167         more user friendly. */\r
168         typedef struct QUEUE_REGISTRY_ITEM\r
169         {\r
170                 char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
171                 QueueHandle_t xHandle;\r
172         } QueueRegistryItem_t;\r
173 \r
174         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
175         The pcQueueName member of a structure being NULL is indicative of the\r
176         array position being vacant. */\r
177         QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
178 \r
179 #endif /* configQUEUE_REGISTRY_SIZE */\r
180 \r
181 /*\r
182  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
183  * prevent an ISR from adding or removing items to the queue, but does prevent\r
184  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
185  * queue is locked it will instead increment the appropriate queue lock count\r
186  * to indicate that a task may require unblocking.  When the queue in unlocked\r
187  * these lock counts are inspected, and the appropriate action taken.\r
188  */\r
189 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
190 \r
191 /*\r
192  * Uses a critical section to determine if there is any data in a queue.\r
193  *\r
194  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
195  */\r
196 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
197 \r
198 /*\r
199  * Uses a critical section to determine if there is any space in a queue.\r
200  *\r
201  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
202  */\r
203 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
204 \r
205 /*\r
206  * Copies an item into the queue, either at the front of the queue or the\r
207  * back of the queue.\r
208  */\r
209 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
210 \r
211 /*\r
212  * Copies an item out of a queue.\r
213  */\r
214 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
215 \r
216 #if ( configUSE_QUEUE_SETS == 1 )\r
217         /*\r
218          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
219          * the queue set that the queue contains data.\r
220          */\r
221         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
222 #endif\r
223 \r
224 /*-----------------------------------------------------------*/\r
225 \r
226 /*\r
227  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
228  * accessing the queue event lists.\r
229  */\r
230 #define prvLockQueue( pxQueue )                                                         \\r
231         taskENTER_CRITICAL();                                                                   \\r
232         {                                                                                                               \\r
233                 if( ( pxQueue )->xRxLock == queueUNLOCKED )                     \\r
234                 {                                                                                                       \\r
235                         ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \\r
236                 }                                                                                                       \\r
237                 if( ( pxQueue )->xTxLock == queueUNLOCKED )                     \\r
238                 {                                                                                                       \\r
239                         ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \\r
240                 }                                                                                                       \\r
241         }                                                                                                               \\r
242         taskEXIT_CRITICAL()\r
243 /*-----------------------------------------------------------*/\r
244 \r
245 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
246 {\r
247 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
248 \r
249         configASSERT( pxQueue );\r
250 \r
251         taskENTER_CRITICAL();\r
252         {\r
253                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
254                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
255                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
256                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
257                 pxQueue->xRxLock = queueUNLOCKED;\r
258                 pxQueue->xTxLock = queueUNLOCKED;\r
259 \r
260                 if( xNewQueue == pdFALSE )\r
261                 {\r
262                         /* If there are tasks blocked waiting to read from the queue, then\r
263                         the tasks will remain blocked as after this function exits the queue\r
264                         will still be empty.  If there are tasks blocked waiting to write to\r
265                         the queue, then one should be unblocked as after this function exits\r
266                         it will be possible to write to it. */\r
267                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
268                         {\r
269                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
270                                 {\r
271                                         queueYIELD_IF_USING_PREEMPTION();\r
272                                 }\r
273                                 else\r
274                                 {\r
275                                         mtCOVERAGE_TEST_MARKER();\r
276                                 }\r
277                         }\r
278                         else\r
279                         {\r
280                                 mtCOVERAGE_TEST_MARKER();\r
281                         }\r
282                 }\r
283                 else\r
284                 {\r
285                         /* Ensure the event queues start in the correct state. */\r
286                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
287                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
288                 }\r
289         }\r
290         taskEXIT_CRITICAL();\r
291 \r
292         /* A value is returned for calling semantic consistency with previous\r
293         versions. */\r
294         return pdPASS;\r
295 }\r
296 /*-----------------------------------------------------------*/\r
297 \r
298 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
299 {\r
300 Queue_t *pxNewQueue;\r
301 size_t xQueueSizeInBytes;\r
302 QueueHandle_t xReturn = NULL;\r
303 \r
304         /* Remove compiler warnings about unused parameters should\r
305         configUSE_TRACE_FACILITY not be set to 1. */\r
306         ( void ) ucQueueType;\r
307 \r
308         /* Allocate the new queue structure. */\r
309         if( uxQueueLength > ( UBaseType_t ) 0 )\r
310         {\r
311                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
312                 if( pxNewQueue != NULL )\r
313                 {\r
314                         /* Create the list of pointers to queue items.  The queue is one byte\r
315                         longer than asked for to make wrap checking easier/faster. */\r
316                         xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
317 \r
318                         pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );\r
319                         if( pxNewQueue->pcHead != NULL )\r
320                         {\r
321                                 /* Initialise the queue members as described above where the\r
322                                 queue type is defined. */\r
323                                 pxNewQueue->uxLength = uxQueueLength;\r
324                                 pxNewQueue->uxItemSize = uxItemSize;\r
325                                 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
326 \r
327                                 #if ( configUSE_TRACE_FACILITY == 1 )\r
328                                 {\r
329                                         pxNewQueue->ucQueueType = ucQueueType;\r
330                                 }\r
331                                 #endif /* configUSE_TRACE_FACILITY */\r
332 \r
333                                 #if( configUSE_QUEUE_SETS == 1 )\r
334                                 {\r
335                                         pxNewQueue->pxQueueSetContainer = NULL;\r
336                                 }\r
337                                 #endif /* configUSE_QUEUE_SETS */\r
338 \r
339                                 traceQUEUE_CREATE( pxNewQueue );\r
340                                 xReturn = pxNewQueue;\r
341                         }\r
342                         else\r
343                         {\r
344                                 traceQUEUE_CREATE_FAILED( ucQueueType );\r
345                                 vPortFree( pxNewQueue );\r
346                         }\r
347                 }\r
348                 else\r
349                 {\r
350                         mtCOVERAGE_TEST_MARKER();\r
351                 }\r
352         }\r
353         else\r
354         {\r
355                 mtCOVERAGE_TEST_MARKER();\r
356         }\r
357 \r
358         configASSERT( xReturn );\r
359 \r
360         return xReturn;\r
361 }\r
362 /*-----------------------------------------------------------*/\r
363 \r
364 #if ( configUSE_MUTEXES == 1 )\r
365 \r
366         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
367         {\r
368         Queue_t *pxNewQueue;\r
369 \r
370                 /* Prevent compiler warnings about unused parameters if\r
371                 configUSE_TRACE_FACILITY does not equal 1. */\r
372                 ( void ) ucQueueType;\r
373 \r
374                 /* Allocate the new queue structure. */\r
375                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
376                 if( pxNewQueue != NULL )\r
377                 {\r
378                         /* Information required for priority inheritance. */\r
379                         pxNewQueue->pxMutexHolder = NULL;\r
380                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
381 \r
382                         /* Queues used as a mutex no data is actually copied into or out\r
383                         of the queue. */\r
384                         pxNewQueue->pcWriteTo = NULL;\r
385                         pxNewQueue->u.pcReadFrom = NULL;\r
386 \r
387                         /* Each mutex has a length of 1 (like a binary semaphore) and\r
388                         an item size of 0 as nothing is actually copied into or out\r
389                         of the mutex. */\r
390                         pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
391                         pxNewQueue->uxLength = ( UBaseType_t ) 1U;\r
392                         pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;\r
393                         pxNewQueue->xRxLock = queueUNLOCKED;\r
394                         pxNewQueue->xTxLock = queueUNLOCKED;\r
395 \r
396                         #if ( configUSE_TRACE_FACILITY == 1 )\r
397                         {\r
398                                 pxNewQueue->ucQueueType = ucQueueType;\r
399                         }\r
400                         #endif\r
401 \r
402                         #if ( configUSE_QUEUE_SETS == 1 )\r
403                         {\r
404                                 pxNewQueue->pxQueueSetContainer = NULL;\r
405                         }\r
406                         #endif\r
407 \r
408                         /* Ensure the event queues start with the correct state. */\r
409                         vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );\r
410                         vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );\r
411 \r
412                         traceCREATE_MUTEX( pxNewQueue );\r
413 \r
414                         /* Start with the semaphore in the expected state. */\r
415                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
416                 }\r
417                 else\r
418                 {\r
419                         traceCREATE_MUTEX_FAILED();\r
420                 }\r
421 \r
422                 configASSERT( pxNewQueue );\r
423                 return pxNewQueue;\r
424         }\r
425 \r
426 #endif /* configUSE_MUTEXES */\r
427 /*-----------------------------------------------------------*/\r
428 \r
429 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
430 \r
431         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
432         {\r
433         void *pxReturn;\r
434 \r
435                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
436                 be called directly.  Note:  This is a good way of determining if the\r
437                 calling task is the mutex holder, but not a good way of determining the\r
438                 identity of the mutex holder, as the holder may change between the\r
439                 following critical section exiting and the function returning. */\r
440                 taskENTER_CRITICAL();\r
441                 {\r
442                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
443                         {\r
444                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
445                         }\r
446                         else\r
447                         {\r
448                                 pxReturn = NULL;\r
449                         }\r
450                 }\r
451                 taskEXIT_CRITICAL();\r
452 \r
453                 return pxReturn;\r
454         }\r
455 \r
456 #endif\r
457 /*-----------------------------------------------------------*/\r
458 \r
459 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
460 \r
461         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
462         {\r
463         BaseType_t xReturn;\r
464         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
465 \r
466                 configASSERT( pxMutex );\r
467 \r
468                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
469                 change outside of this task.  If this task does not hold the mutex then\r
470                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
471                 this is the only condition we are interested in it does not matter if\r
472                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
473                 mutual exclusion is required to test the pxMutexHolder variable. */\r
474                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
475                 {\r
476                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
477 \r
478                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
479                         the task handle, therefore no underflow check is required.  Also,\r
480                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
481                         there can only be one, no mutual exclusion is required to modify the\r
482                         uxRecursiveCallCount member. */\r
483                         ( pxMutex->u.uxRecursiveCallCount )--;\r
484 \r
485                         /* Have we unwound the call count? */\r
486                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
487                         {\r
488                                 /* Return the mutex.  This will automatically unblock any other\r
489                                 task that might be waiting to access the mutex. */\r
490                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
491                         }\r
492                         else\r
493                         {\r
494                                 mtCOVERAGE_TEST_MARKER();\r
495                         }\r
496 \r
497                         xReturn = pdPASS;\r
498                 }\r
499                 else\r
500                 {\r
501                         /* We cannot give the mutex because we are not the holder. */\r
502                         xReturn = pdFAIL;\r
503 \r
504                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
505                 }\r
506 \r
507                 return xReturn;\r
508         }\r
509 \r
510 #endif /* configUSE_RECURSIVE_MUTEXES */\r
511 /*-----------------------------------------------------------*/\r
512 \r
513 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
514 \r
515         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xBlockTime )\r
516         {\r
517         BaseType_t xReturn;\r
518         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
519 \r
520                 configASSERT( pxMutex );\r
521 \r
522                 /* Comments regarding mutual exclusion as per those within\r
523                 xQueueGiveMutexRecursive(). */\r
524 \r
525                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
526 \r
527                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
528                 {\r
529                         ( pxMutex->u.uxRecursiveCallCount )++;\r
530                         xReturn = pdPASS;\r
531                 }\r
532                 else\r
533                 {\r
534                         xReturn = xQueueGenericReceive( pxMutex, NULL, xBlockTime, pdFALSE );\r
535 \r
536                         /* pdPASS will only be returned if we successfully obtained the mutex,\r
537                         we may have blocked to reach here. */\r
538                         if( xReturn == pdPASS )\r
539                         {\r
540                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
541                         }\r
542                         else\r
543                         {\r
544                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
545                         }\r
546                 }\r
547 \r
548                 return xReturn;\r
549         }\r
550 \r
551 #endif /* configUSE_RECURSIVE_MUTEXES */\r
552 /*-----------------------------------------------------------*/\r
553 \r
554 #if ( configUSE_COUNTING_SEMAPHORES == 1 )\r
555 \r
556         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
557         {\r
558         QueueHandle_t xHandle;\r
559 \r
560                 configASSERT( uxMaxCount != 0 );\r
561                 configASSERT( uxInitialCount <= uxMaxCount );\r
562 \r
563                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
564 \r
565                 if( xHandle != NULL )\r
566                 {\r
567                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
568 \r
569                         traceCREATE_COUNTING_SEMAPHORE();\r
570                 }\r
571                 else\r
572                 {\r
573                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
574                 }\r
575 \r
576                 configASSERT( xHandle );\r
577                 return xHandle;\r
578         }\r
579 \r
580 #endif /* configUSE_COUNTING_SEMAPHORES */\r
581 /*-----------------------------------------------------------*/\r
582 \r
583 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
584 {\r
585 BaseType_t xEntryTimeSet = pdFALSE;\r
586 TimeOut_t xTimeOut;\r
587 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
588 \r
589         configASSERT( pxQueue );\r
590         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
591         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
592         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
593         {\r
594                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
595         }\r
596         #endif\r
597 \r
598 \r
599         /* This function relaxes the coding standard somewhat to allow return\r
600         statements within the function itself.  This is done in the interest\r
601         of execution time efficiency. */\r
602         for( ;; )\r
603         {\r
604                 taskENTER_CRITICAL();\r
605                 {\r
606                         /* Is there room on the queue now?  The running task must be\r
607                         the highest priority task wanting to access the queue.  If\r
608                         the head item in the queue is to be overwritten then it does\r
609                         not matter if the queue is full. */\r
610                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
611                         {\r
612                                 traceQUEUE_SEND( pxQueue );\r
613                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
614 \r
615                                 #if ( configUSE_QUEUE_SETS == 1 )\r
616                                 {\r
617                                         if( pxQueue->pxQueueSetContainer != NULL )\r
618                                         {\r
619                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
620                                                 {\r
621                                                         /* The queue is a member of a queue set, and posting\r
622                                                         to the queue set caused a higher priority task to\r
623                                                         unblock. A context switch is required. */\r
624                                                         queueYIELD_IF_USING_PREEMPTION();\r
625                                                 }\r
626                                                 else\r
627                                                 {\r
628                                                         mtCOVERAGE_TEST_MARKER();\r
629                                                 }\r
630                                         }\r
631                                         else\r
632                                         {\r
633                                                 /* If there was a task waiting for data to arrive on the\r
634                                                 queue then unblock it now. */\r
635                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
636                                                 {\r
637                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
638                                                         {\r
639                                                                 /* The unblocked task has a priority higher than\r
640                                                                 our own so yield immediately.  Yes it is ok to\r
641                                                                 do this from within the critical section - the\r
642                                                                 kernel takes care of that. */\r
643                                                                 queueYIELD_IF_USING_PREEMPTION();\r
644                                                         }\r
645                                                         else\r
646                                                         {\r
647                                                                 mtCOVERAGE_TEST_MARKER();\r
648                                                         }\r
649                                                 }\r
650                                                 else\r
651                                                 {\r
652                                                         mtCOVERAGE_TEST_MARKER();\r
653                                                 }\r
654                                         }\r
655                                 }\r
656                                 #else /* configUSE_QUEUE_SETS */\r
657                                 {\r
658                                         /* If there was a task waiting for data to arrive on the\r
659                                         queue then unblock it now. */\r
660                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
661                                         {\r
662                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
663                                                 {\r
664                                                         /* The unblocked task has a priority higher than\r
665                                                         our own so yield immediately.  Yes it is ok to do\r
666                                                         this from within the critical section - the kernel\r
667                                                         takes care of that. */\r
668                                                         queueYIELD_IF_USING_PREEMPTION();\r
669                                                 }\r
670                                                 else\r
671                                                 {\r
672                                                         mtCOVERAGE_TEST_MARKER();\r
673                                                 }\r
674                                         }\r
675                                         else\r
676                                         {\r
677                                                 mtCOVERAGE_TEST_MARKER();\r
678                                         }\r
679                                 }\r
680                                 #endif /* configUSE_QUEUE_SETS */\r
681 \r
682                                 taskEXIT_CRITICAL();\r
683 \r
684                                 /* Return to the original privilege level before exiting the\r
685                                 function. */\r
686                                 return pdPASS;\r
687                         }\r
688                         else\r
689                         {\r
690                                 if( xTicksToWait == ( TickType_t ) 0 )\r
691                                 {\r
692                                         /* The queue was full and no block time is specified (or\r
693                                         the block time has expired) so leave now. */\r
694                                         taskEXIT_CRITICAL();\r
695 \r
696                                         /* Return to the original privilege level before exiting\r
697                                         the function. */\r
698                                         traceQUEUE_SEND_FAILED( pxQueue );\r
699                                         return errQUEUE_FULL;\r
700                                 }\r
701                                 else if( xEntryTimeSet == pdFALSE )\r
702                                 {\r
703                                         /* The queue was full and a block time was specified so\r
704                                         configure the timeout structure. */\r
705                                         vTaskSetTimeOutState( &xTimeOut );\r
706                                         xEntryTimeSet = pdTRUE;\r
707                                 }\r
708                                 else\r
709                                 {\r
710                                         /* Entry time was already set. */\r
711                                         mtCOVERAGE_TEST_MARKER();\r
712                                 }\r
713                         }\r
714                 }\r
715                 taskEXIT_CRITICAL();\r
716 \r
717                 /* Interrupts and other tasks can send to and receive from the queue\r
718                 now the critical section has been exited. */\r
719 \r
720                 vTaskSuspendAll();\r
721                 prvLockQueue( pxQueue );\r
722 \r
723                 /* Update the timeout state to see if it has expired yet. */\r
724                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
725                 {\r
726                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
727                         {\r
728                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
729                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
730 \r
731                                 /* Unlocking the queue means queue events can effect the\r
732                                 event list.  It is possible     that interrupts occurring now\r
733                                 remove this task from the event list again - but as the\r
734                                 scheduler is suspended the task will go onto the pending\r
735                                 ready last instead of the actual ready list. */\r
736                                 prvUnlockQueue( pxQueue );\r
737 \r
738                                 /* Resuming the scheduler will move tasks from the pending\r
739                                 ready list into the ready list - so it is feasible that this\r
740                                 task is already in a ready list before it yields - in which\r
741                                 case the yield will not cause a context switch unless there\r
742                                 is also a higher priority task in the pending ready list. */\r
743                                 if( xTaskResumeAll() == pdFALSE )\r
744                                 {\r
745                                         portYIELD_WITHIN_API();\r
746                                 }\r
747                         }\r
748                         else\r
749                         {\r
750                                 /* Try again. */\r
751                                 prvUnlockQueue( pxQueue );\r
752                                 ( void ) xTaskResumeAll();\r
753                         }\r
754                 }\r
755                 else\r
756                 {\r
757                         /* The timeout has expired. */\r
758                         prvUnlockQueue( pxQueue );\r
759                         ( void ) xTaskResumeAll();\r
760 \r
761                         /* Return to the original privilege level before exiting the\r
762                         function. */\r
763                         traceQUEUE_SEND_FAILED( pxQueue );\r
764                         return errQUEUE_FULL;\r
765                 }\r
766         }\r
767 }\r
768 /*-----------------------------------------------------------*/\r
769 \r
770 #if ( configUSE_ALTERNATIVE_API == 1 )\r
771 \r
772         BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )\r
773         {\r
774         BaseType_t xEntryTimeSet = pdFALSE;\r
775         TimeOut_t xTimeOut;\r
776         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
777 \r
778                 configASSERT( pxQueue );\r
779                 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
780 \r
781                 for( ;; )\r
782                 {\r
783                         taskENTER_CRITICAL();\r
784                         {\r
785                                 /* Is there room on the queue now?  To be running we must be\r
786                                 the highest priority task wanting to access the queue. */\r
787                                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
788                                 {\r
789                                         traceQUEUE_SEND( pxQueue );\r
790                                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
791 \r
792                                         /* If there was a task waiting for data to arrive on the\r
793                                         queue then unblock it now. */\r
794                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
795                                         {\r
796                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
797                                                 {\r
798                                                         /* The unblocked task has a priority higher than\r
799                                                         our own so yield immediately. */\r
800                                                         portYIELD_WITHIN_API();\r
801                                                 }\r
802                                                 else\r
803                                                 {\r
804                                                         mtCOVERAGE_TEST_MARKER();\r
805                                                 }\r
806                                         }\r
807                                         else\r
808                                         {\r
809                                                 mtCOVERAGE_TEST_MARKER();\r
810                                         }\r
811 \r
812                                         taskEXIT_CRITICAL();\r
813                                         return pdPASS;\r
814                                 }\r
815                                 else\r
816                                 {\r
817                                         if( xTicksToWait == ( TickType_t ) 0 )\r
818                                         {\r
819                                                 taskEXIT_CRITICAL();\r
820                                                 return errQUEUE_FULL;\r
821                                         }\r
822                                         else if( xEntryTimeSet == pdFALSE )\r
823                                         {\r
824                                                 vTaskSetTimeOutState( &xTimeOut );\r
825                                                 xEntryTimeSet = pdTRUE;\r
826                                         }\r
827                                 }\r
828                         }\r
829                         taskEXIT_CRITICAL();\r
830 \r
831                         taskENTER_CRITICAL();\r
832                         {\r
833                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
834                                 {\r
835                                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
836                                         {\r
837                                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
838                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
839                                                 portYIELD_WITHIN_API();\r
840                                         }\r
841                                         else\r
842                                         {\r
843                                                 mtCOVERAGE_TEST_MARKER();\r
844                                         }\r
845                                 }\r
846                                 else\r
847                                 {\r
848                                         taskEXIT_CRITICAL();\r
849                                         traceQUEUE_SEND_FAILED( pxQueue );\r
850                                         return errQUEUE_FULL;\r
851                                 }\r
852                         }\r
853                         taskEXIT_CRITICAL();\r
854                 }\r
855         }\r
856 \r
857 #endif /* configUSE_ALTERNATIVE_API */\r
858 /*-----------------------------------------------------------*/\r
859 \r
860 #if ( configUSE_ALTERNATIVE_API == 1 )\r
861 \r
862         BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )\r
863         {\r
864         BaseType_t xEntryTimeSet = pdFALSE;\r
865         TimeOut_t xTimeOut;\r
866         int8_t *pcOriginalReadPosition;\r
867         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
868 \r
869                 configASSERT( pxQueue );\r
870                 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
871 \r
872                 for( ;; )\r
873                 {\r
874                         taskENTER_CRITICAL();\r
875                         {\r
876                                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
877                                 {\r
878                                         /* Remember our read position in case we are just peeking. */\r
879                                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
880 \r
881                                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
882 \r
883                                         if( xJustPeeking == pdFALSE )\r
884                                         {\r
885                                                 traceQUEUE_RECEIVE( pxQueue );\r
886 \r
887                                                 /* Data is actually being removed (not just peeked). */\r
888                                                 --( pxQueue->uxMessagesWaiting );\r
889 \r
890                                                 #if ( configUSE_MUTEXES == 1 )\r
891                                                 {\r
892                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
893                                                         {\r
894                                                                 /* Record the information required to implement\r
895                                                                 priority inheritance should it become necessary. */\r
896                                                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();\r
897                                                         }\r
898                                                         else\r
899                                                         {\r
900                                                                 mtCOVERAGE_TEST_MARKER();\r
901                                                         }\r
902                                                 }\r
903                                                 #endif\r
904 \r
905                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
906                                                 {\r
907                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
908                                                         {\r
909                                                                 portYIELD_WITHIN_API();\r
910                                                         }\r
911                                                         else\r
912                                                         {\r
913                                                                 mtCOVERAGE_TEST_MARKER();\r
914                                                         }\r
915                                                 }\r
916                                         }\r
917                                         else\r
918                                         {\r
919                                                 traceQUEUE_PEEK( pxQueue );\r
920 \r
921                                                 /* We are not removing the data, so reset our read\r
922                                                 pointer. */\r
923                                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
924 \r
925                                                 /* The data is being left in the queue, so see if there are\r
926                                                 any other tasks waiting for the data. */\r
927                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
928                                                 {\r
929                                                         /* Tasks that are removed from the event list will get added to\r
930                                                         the pending ready list as the scheduler is still suspended. */\r
931                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
932                                                         {\r
933                                                                 /* The task waiting has a higher priority than this task. */\r
934                                                                 portYIELD_WITHIN_API();\r
935                                                         }\r
936                                                         else\r
937                                                         {\r
938                                                                 mtCOVERAGE_TEST_MARKER();\r
939                                                         }\r
940                                                 }\r
941                                                 else\r
942                                                 {\r
943                                                         mtCOVERAGE_TEST_MARKER();\r
944                                                 }\r
945                                         }\r
946 \r
947                                         taskEXIT_CRITICAL();\r
948                                         return pdPASS;\r
949                                 }\r
950                                 else\r
951                                 {\r
952                                         if( xTicksToWait == ( TickType_t ) 0 )\r
953                                         {\r
954                                                 taskEXIT_CRITICAL();\r
955                                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
956                                                 return errQUEUE_EMPTY;\r
957                                         }\r
958                                         else if( xEntryTimeSet == pdFALSE )\r
959                                         {\r
960                                                 vTaskSetTimeOutState( &xTimeOut );\r
961                                                 xEntryTimeSet = pdTRUE;\r
962                                         }\r
963                                 }\r
964                         }\r
965                         taskEXIT_CRITICAL();\r
966 \r
967                         taskENTER_CRITICAL();\r
968                         {\r
969                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
970                                 {\r
971                                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
972                                         {\r
973                                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
974 \r
975                                                 #if ( configUSE_MUTEXES == 1 )\r
976                                                 {\r
977                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
978                                                         {\r
979                                                                 taskENTER_CRITICAL();\r
980                                                                 {\r
981                                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
982                                                                 }\r
983                                                                 taskEXIT_CRITICAL();\r
984                                                         }\r
985                                                         else\r
986                                                         {\r
987                                                                 mtCOVERAGE_TEST_MARKER();\r
988                                                         }\r
989                                                 }\r
990                                                 #endif\r
991 \r
992                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
993                                                 portYIELD_WITHIN_API();\r
994                                         }\r
995                                         else\r
996                                         {\r
997                                                 mtCOVERAGE_TEST_MARKER();\r
998                                         }\r
999                                 }\r
1000                                 else\r
1001                                 {\r
1002                                         taskEXIT_CRITICAL();\r
1003                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1004                                         return errQUEUE_EMPTY;\r
1005                                 }\r
1006                         }\r
1007                         taskEXIT_CRITICAL();\r
1008                 }\r
1009         }\r
1010 \r
1011 \r
1012 #endif /* configUSE_ALTERNATIVE_API */\r
1013 /*-----------------------------------------------------------*/\r
1014 \r
1015 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
1016 {\r
1017 BaseType_t xReturn;\r
1018 UBaseType_t uxSavedInterruptStatus;\r
1019 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1020 \r
1021         configASSERT( pxQueue );\r
1022         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1023         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
1024 \r
1025         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1026         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1027         above the maximum system call priority are kept permanently enabled, even\r
1028         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1029         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1030         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1031         failure if a FreeRTOS API function is called from an interrupt that has been\r
1032         assigned a priority above the configured maximum system call priority.\r
1033         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1034         that have been assigned a priority at or (logically) below the maximum\r
1035         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1036         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1037         More information (albeit Cortex-M specific) is provided on the following\r
1038         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1039         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1040 \r
1041         /* Similar to xQueueGenericSend, except we don't block if there is no room\r
1042         in the queue.  Also we don't directly wake a task that was blocked on a\r
1043         queue read, instead we return a flag to say whether a context switch is\r
1044         required or not (i.e. has a task with a higher priority than us been woken\r
1045         by this post). */\r
1046         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1047         {\r
1048                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
1049                 {\r
1050                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1051 \r
1052                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1053 \r
1054                         /* If the queue is locked we do not alter the event list.  This will\r
1055                         be done when the queue is unlocked later. */\r
1056                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1057                         {\r
1058                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1059                                 {\r
1060                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1061                                         {\r
1062                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
1063                                                 {\r
1064                                                         /* The queue is a member of a queue set, and posting\r
1065                                                         to the queue set caused a higher priority task to\r
1066                                                         unblock.  A context switch is required. */\r
1067                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1068                                                         {\r
1069                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1070                                                         }\r
1071                                                         else\r
1072                                                         {\r
1073                                                                 mtCOVERAGE_TEST_MARKER();\r
1074                                                         }\r
1075                                                 }\r
1076                                                 else\r
1077                                                 {\r
1078                                                         mtCOVERAGE_TEST_MARKER();\r
1079                                                 }\r
1080                                         }\r
1081                                         else\r
1082                                         {\r
1083                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1084                                                 {\r
1085                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1086                                                         {\r
1087                                                                 /* The task waiting has a higher priority so record that a\r
1088                                                                 context switch is required. */\r
1089                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1090                                                                 {\r
1091                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1092                                                                 }\r
1093                                                                 else\r
1094                                                                 {\r
1095                                                                         mtCOVERAGE_TEST_MARKER();\r
1096                                                                 }\r
1097                                                         }\r
1098                                                         else\r
1099                                                         {\r
1100                                                                 mtCOVERAGE_TEST_MARKER();\r
1101                                                         }\r
1102                                                 }\r
1103                                                 else\r
1104                                                 {\r
1105                                                         mtCOVERAGE_TEST_MARKER();\r
1106                                                 }\r
1107                                         }\r
1108                                 }\r
1109                                 #else /* configUSE_QUEUE_SETS */\r
1110                                 {\r
1111                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1112                                         {\r
1113                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1114                                                 {\r
1115                                                         /* The task waiting has a higher priority so record that a\r
1116                                                         context switch is required. */\r
1117                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1118                                                         {\r
1119                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1120                                                         }\r
1121                                                         else\r
1122                                                         {\r
1123                                                                 mtCOVERAGE_TEST_MARKER();\r
1124                                                         }\r
1125                                                 }\r
1126                                                 else\r
1127                                                 {\r
1128                                                         mtCOVERAGE_TEST_MARKER();\r
1129                                                 }\r
1130                                         }\r
1131                                         else\r
1132                                         {\r
1133                                                 mtCOVERAGE_TEST_MARKER();\r
1134                                         }\r
1135                                 }\r
1136                                 #endif /* configUSE_QUEUE_SETS */\r
1137                         }\r
1138                         else\r
1139                         {\r
1140                                 /* Increment the lock count so the task that unlocks the queue\r
1141                                 knows that data was posted while it was locked. */\r
1142                                 ++( pxQueue->xTxLock );\r
1143                         }\r
1144 \r
1145                         xReturn = pdPASS;\r
1146                 }\r
1147                 else\r
1148                 {\r
1149                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1150                         xReturn = errQUEUE_FULL;\r
1151                 }\r
1152         }\r
1153         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1154 \r
1155         return xReturn;\r
1156 }\r
1157 /*-----------------------------------------------------------*/\r
1158 \r
1159 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1160 {\r
1161 BaseType_t xEntryTimeSet = pdFALSE;\r
1162 TimeOut_t xTimeOut;\r
1163 int8_t *pcOriginalReadPosition;\r
1164 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1165 \r
1166         configASSERT( pxQueue );\r
1167         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1168         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1169         {\r
1170                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1171         }\r
1172         #endif\r
1173 \r
1174         /* This function relaxes the coding standard somewhat to allow return\r
1175         statements within the function itself.  This is done in the interest\r
1176         of execution time efficiency. */\r
1177 \r
1178         for( ;; )\r
1179         {\r
1180                 taskENTER_CRITICAL();\r
1181                 {\r
1182                         /* Is there data in the queue now?  To be running we must be\r
1183                         the highest priority task wanting to access the queue. */\r
1184                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1185                         {\r
1186                                 /* Remember the read position in case the queue is only being\r
1187                                 peeked. */\r
1188                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1189 \r
1190                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1191 \r
1192                                 if( xJustPeeking == pdFALSE )\r
1193                                 {\r
1194                                         traceQUEUE_RECEIVE( pxQueue );\r
1195 \r
1196                                         /* Actually removing data, not just peeking. */\r
1197                                         --( pxQueue->uxMessagesWaiting );\r
1198 \r
1199                                         #if ( configUSE_MUTEXES == 1 )\r
1200                                         {\r
1201                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1202                                                 {\r
1203                                                         /* Record the information required to implement\r
1204                                                         priority inheritance should it become necessary. */\r
1205                                                         pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1206                                                 }\r
1207                                                 else\r
1208                                                 {\r
1209                                                         mtCOVERAGE_TEST_MARKER();\r
1210                                                 }\r
1211                                         }\r
1212                                         #endif\r
1213 \r
1214                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1215                                         {\r
1216                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1217                                                 {\r
1218                                                         queueYIELD_IF_USING_PREEMPTION();\r
1219                                                 }\r
1220                                                 else\r
1221                                                 {\r
1222                                                         mtCOVERAGE_TEST_MARKER();\r
1223                                                 }\r
1224                                         }\r
1225                                         else\r
1226                                         {\r
1227                                                 mtCOVERAGE_TEST_MARKER();\r
1228                                         }\r
1229                                 }\r
1230                                 else\r
1231                                 {\r
1232                                         traceQUEUE_PEEK( pxQueue );\r
1233 \r
1234                                         /* The data is not being removed, so reset the read\r
1235                                         pointer. */\r
1236                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1237 \r
1238                                         /* The data is being left in the queue, so see if there are\r
1239                                         any other tasks waiting for the data. */\r
1240                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1241                                         {\r
1242                                                 /* Tasks that are removed from the event list will get added to\r
1243                                                 the pending ready list as the scheduler is still suspended. */\r
1244                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1245                                                 {\r
1246                                                         /* The task waiting has a higher priority than this task. */\r
1247                                                         queueYIELD_IF_USING_PREEMPTION();\r
1248                                                 }\r
1249                                                 else\r
1250                                                 {\r
1251                                                         mtCOVERAGE_TEST_MARKER();\r
1252                                                 }\r
1253                                         }\r
1254                                         else\r
1255                                         {\r
1256                                                 mtCOVERAGE_TEST_MARKER();\r
1257                                         }\r
1258                                 }\r
1259 \r
1260                                 taskEXIT_CRITICAL();\r
1261                                 return pdPASS;\r
1262                         }\r
1263                         else\r
1264                         {\r
1265                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1266                                 {\r
1267                                         /* The queue was empty and no block time is specified (or\r
1268                                         the block time has expired) so leave now. */\r
1269                                         taskEXIT_CRITICAL();\r
1270                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1271                                         return errQUEUE_EMPTY;\r
1272                                 }\r
1273                                 else if( xEntryTimeSet == pdFALSE )\r
1274                                 {\r
1275                                         /* The queue was empty and a block time was specified so\r
1276                                         configure the timeout structure. */\r
1277                                         vTaskSetTimeOutState( &xTimeOut );\r
1278                                         xEntryTimeSet = pdTRUE;\r
1279                                 }\r
1280                                 else\r
1281                                 {\r
1282                                         /* Entry time was already set. */\r
1283                                         mtCOVERAGE_TEST_MARKER();\r
1284                                 }\r
1285                         }\r
1286                 }\r
1287                 taskEXIT_CRITICAL();\r
1288 \r
1289                 /* Interrupts and other tasks can send to and receive from the queue\r
1290                 now the critical section has been exited. */\r
1291 \r
1292                 vTaskSuspendAll();\r
1293                 prvLockQueue( pxQueue );\r
1294 \r
1295                 /* Update the timeout state to see if it has expired yet. */\r
1296                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1297                 {\r
1298                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1299                         {\r
1300                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1301 \r
1302                                 #if ( configUSE_MUTEXES == 1 )\r
1303                                 {\r
1304                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1305                                         {\r
1306                                                 taskENTER_CRITICAL();\r
1307                                                 {\r
1308                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1309                                                 }\r
1310                                                 taskEXIT_CRITICAL();\r
1311                                         }\r
1312                                         else\r
1313                                         {\r
1314                                                 mtCOVERAGE_TEST_MARKER();\r
1315                                         }\r
1316                                 }\r
1317                                 #endif\r
1318 \r
1319                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1320                                 prvUnlockQueue( pxQueue );\r
1321                                 if( xTaskResumeAll() == pdFALSE )\r
1322                                 {\r
1323                                         portYIELD_WITHIN_API();\r
1324                                 }\r
1325                                 else\r
1326                                 {\r
1327                                         mtCOVERAGE_TEST_MARKER();\r
1328                                 }\r
1329                         }\r
1330                         else\r
1331                         {\r
1332                                 /* Try again. */\r
1333                                 prvUnlockQueue( pxQueue );\r
1334                                 ( void ) xTaskResumeAll();\r
1335                         }\r
1336                 }\r
1337                 else\r
1338                 {\r
1339                         prvUnlockQueue( pxQueue );\r
1340                         ( void ) xTaskResumeAll();\r
1341                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1342                         return errQUEUE_EMPTY;\r
1343                 }\r
1344         }\r
1345 }\r
1346 /*-----------------------------------------------------------*/\r
1347 \r
1348 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1349 {\r
1350 BaseType_t xReturn;\r
1351 UBaseType_t uxSavedInterruptStatus;\r
1352 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1353 \r
1354         configASSERT( pxQueue );\r
1355         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1356 \r
1357         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1358         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1359         above the maximum system call priority are kept permanently enabled, even\r
1360         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1361         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1362         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1363         failure if a FreeRTOS API function is called from an interrupt that has been\r
1364         assigned a priority above the configured maximum system call priority.\r
1365         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1366         that have been assigned a priority at or (logically) below the maximum\r
1367         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1368         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1369         More information (albeit Cortex-M specific) is provided on the following\r
1370         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1371         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1372 \r
1373         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1374         {\r
1375                 /* Cannot block in an ISR, so check there is data available. */\r
1376                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1377                 {\r
1378                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1379 \r
1380                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1381                         --( pxQueue->uxMessagesWaiting );\r
1382 \r
1383                         /* If the queue is locked the event list will not be modified.\r
1384                         Instead update the lock count so the task that unlocks the queue\r
1385                         will know that an ISR has removed data while the queue was\r
1386                         locked. */\r
1387                         if( pxQueue->xRxLock == queueUNLOCKED )\r
1388                         {\r
1389                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1390                                 {\r
1391                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1392                                         {\r
1393                                                 /* The task waiting has a higher priority than us so\r
1394                                                 force a context switch. */\r
1395                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1396                                                 {\r
1397                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1398                                                 }\r
1399                                                 else\r
1400                                                 {\r
1401                                                         mtCOVERAGE_TEST_MARKER();\r
1402                                                 }\r
1403                                         }\r
1404                                         else\r
1405                                         {\r
1406                                                 mtCOVERAGE_TEST_MARKER();\r
1407                                         }\r
1408                                 }\r
1409                                 else\r
1410                                 {\r
1411                                         mtCOVERAGE_TEST_MARKER();\r
1412                                 }\r
1413                         }\r
1414                         else\r
1415                         {\r
1416                                 /* Increment the lock count so the task that unlocks the queue\r
1417                                 knows that data was removed while it was locked. */\r
1418                                 ++( pxQueue->xRxLock );\r
1419                         }\r
1420 \r
1421                         xReturn = pdPASS;\r
1422                 }\r
1423                 else\r
1424                 {\r
1425                         xReturn = pdFAIL;\r
1426                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1427                 }\r
1428         }\r
1429         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1430 \r
1431         return xReturn;\r
1432 }\r
1433 /*-----------------------------------------------------------*/\r
1434 \r
1435 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1436 {\r
1437 BaseType_t xReturn;\r
1438 UBaseType_t uxSavedInterruptStatus;\r
1439 int8_t *pcOriginalReadPosition;\r
1440 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1441 \r
1442         configASSERT( pxQueue );\r
1443         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1444 \r
1445         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1446         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1447         above the maximum system call priority are kept permanently enabled, even\r
1448         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1449         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1450         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1451         failure if a FreeRTOS API function is called from an interrupt that has been\r
1452         assigned a priority above the configured maximum system call priority.\r
1453         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1454         that have been assigned a priority at or (logically) below the maximum\r
1455         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1456         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1457         More information (albeit Cortex-M specific) is provided on the following\r
1458         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1459         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1460 \r
1461         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1462         {\r
1463                 /* Cannot block in an ISR, so check there is data available. */\r
1464                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1465                 {\r
1466                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1467 \r
1468                         /* Remember the read position so it can be reset as nothing is\r
1469                         actually being removed from the queue. */\r
1470                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1471                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1472                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1473 \r
1474                         xReturn = pdPASS;\r
1475                 }\r
1476                 else\r
1477                 {\r
1478                         xReturn = pdFAIL;\r
1479                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1480                 }\r
1481         }\r
1482         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1483 \r
1484         return xReturn;\r
1485 }\r
1486 /*-----------------------------------------------------------*/\r
1487 \r
1488 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1489 {\r
1490 UBaseType_t uxReturn;\r
1491 \r
1492         configASSERT( xQueue );\r
1493 \r
1494         taskENTER_CRITICAL();\r
1495         {\r
1496                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1497         }\r
1498         taskEXIT_CRITICAL();\r
1499 \r
1500         return uxReturn;\r
1501 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1502 /*-----------------------------------------------------------*/\r
1503 \r
1504 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1505 {\r
1506 UBaseType_t uxReturn;\r
1507 Queue_t *pxQueue;\r
1508 \r
1509         pxQueue = ( Queue_t * ) xQueue;\r
1510         configASSERT( pxQueue );\r
1511 \r
1512         taskENTER_CRITICAL();\r
1513         {\r
1514                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1515         }\r
1516         taskEXIT_CRITICAL();\r
1517 \r
1518         return uxReturn;\r
1519 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1520 /*-----------------------------------------------------------*/\r
1521 \r
1522 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1523 {\r
1524 UBaseType_t uxReturn;\r
1525 \r
1526         configASSERT( xQueue );\r
1527 \r
1528         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1529 \r
1530         return uxReturn;\r
1531 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1532 /*-----------------------------------------------------------*/\r
1533 \r
1534 void vQueueDelete( QueueHandle_t xQueue )\r
1535 {\r
1536 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1537 \r
1538         configASSERT( pxQueue );\r
1539 \r
1540         traceQUEUE_DELETE( pxQueue );\r
1541         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1542         {\r
1543                 vQueueUnregisterQueue( pxQueue );\r
1544         }\r
1545         #endif\r
1546         if( pxQueue->pcHead != NULL )\r
1547         {\r
1548                 vPortFree( pxQueue->pcHead );\r
1549         }\r
1550         vPortFree( pxQueue );\r
1551 }\r
1552 /*-----------------------------------------------------------*/\r
1553 \r
1554 #if ( configUSE_TRACE_FACILITY == 1 )\r
1555 \r
1556         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1557         {\r
1558                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1559         }\r
1560 \r
1561 #endif /* configUSE_TRACE_FACILITY */\r
1562 /*-----------------------------------------------------------*/\r
1563 \r
1564 #if ( configUSE_TRACE_FACILITY == 1 )\r
1565 \r
1566         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1567         {\r
1568                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1569         }\r
1570 \r
1571 #endif /* configUSE_TRACE_FACILITY */\r
1572 /*-----------------------------------------------------------*/\r
1573 \r
1574 #if ( configUSE_TRACE_FACILITY == 1 )\r
1575 \r
1576         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1577         {\r
1578                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1579         }\r
1580 \r
1581 #endif /* configUSE_TRACE_FACILITY */\r
1582 /*-----------------------------------------------------------*/\r
1583 \r
1584 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1585 {\r
1586         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1587         {\r
1588                 #if ( configUSE_MUTEXES == 1 )\r
1589                 {\r
1590                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1591                         {\r
1592                                 /* The mutex is no longer being held. */\r
1593                                 vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1594                                 pxQueue->pxMutexHolder = NULL;\r
1595                         }\r
1596                         else\r
1597                         {\r
1598                                 mtCOVERAGE_TEST_MARKER();\r
1599                         }\r
1600                 }\r
1601                 #endif /* configUSE_MUTEXES */\r
1602         }\r
1603         else if( xPosition == queueSEND_TO_BACK )\r
1604         {\r
1605                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1606                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1607                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1608                 {\r
1609                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1610                 }\r
1611                 else\r
1612                 {\r
1613                         mtCOVERAGE_TEST_MARKER();\r
1614                 }\r
1615         }\r
1616         else\r
1617         {\r
1618                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1619                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1620                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1621                 {\r
1622                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1623                 }\r
1624                 else\r
1625                 {\r
1626                         mtCOVERAGE_TEST_MARKER();\r
1627                 }\r
1628 \r
1629                 if( xPosition == queueOVERWRITE )\r
1630                 {\r
1631                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1632                         {\r
1633                                 /* An item is not being added but overwritten, so subtract\r
1634                                 one from the recorded number of items in the queue so when\r
1635                                 one is added again below the number of recorded items remains\r
1636                                 correct. */\r
1637                                 --( pxQueue->uxMessagesWaiting );\r
1638                         }\r
1639                         else\r
1640                         {\r
1641                                 mtCOVERAGE_TEST_MARKER();\r
1642                         }\r
1643                 }\r
1644                 else\r
1645                 {\r
1646                         mtCOVERAGE_TEST_MARKER();\r
1647                 }\r
1648         }\r
1649 \r
1650         ++( pxQueue->uxMessagesWaiting );\r
1651 }\r
1652 /*-----------------------------------------------------------*/\r
1653 \r
1654 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
1655 {\r
1656         if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )\r
1657         {\r
1658                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1659                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
1660                 {\r
1661                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1662                 }\r
1663                 else\r
1664                 {\r
1665                         mtCOVERAGE_TEST_MARKER();\r
1666                 }\r
1667                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
1668         }\r
1669         else\r
1670         {\r
1671                 mtCOVERAGE_TEST_MARKER();\r
1672         }\r
1673 }\r
1674 /*-----------------------------------------------------------*/\r
1675 \r
1676 static void prvUnlockQueue( Queue_t * const pxQueue )\r
1677 {\r
1678         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
1679 \r
1680         /* The lock counts contains the number of extra data items placed or\r
1681         removed from the queue while the queue was locked.  When a queue is\r
1682         locked items can be added or removed, but the event lists cannot be\r
1683         updated. */\r
1684         taskENTER_CRITICAL();\r
1685         {\r
1686                 /* See if data was added to the queue while it was locked. */\r
1687                 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
1688                 {\r
1689                         /* Data was posted while the queue was locked.  Are any tasks\r
1690                         blocked waiting for data to become available? */\r
1691                         #if ( configUSE_QUEUE_SETS == 1 )\r
1692                         {\r
1693                                 if( pxQueue->pxQueueSetContainer != NULL )\r
1694                                 {\r
1695                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1696                                         {\r
1697                                                 /* The queue is a member of a queue set, and posting to\r
1698                                                 the queue set caused a higher priority task to unblock.\r
1699                                                 A context switch is required. */\r
1700                                                 vTaskMissedYield();\r
1701                                         }\r
1702                                         else\r
1703                                         {\r
1704                                                 mtCOVERAGE_TEST_MARKER();\r
1705                                         }\r
1706                                 }\r
1707                                 else\r
1708                                 {\r
1709                                         /* Tasks that are removed from the event list will get added to\r
1710                                         the pending ready list as the scheduler is still suspended. */\r
1711                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1712                                         {\r
1713                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1714                                                 {\r
1715                                                         /* The task waiting has a higher priority so record that a\r
1716                                                         context switch is required. */\r
1717                                                         vTaskMissedYield();\r
1718                                                 }\r
1719                                                 else\r
1720                                                 {\r
1721                                                         mtCOVERAGE_TEST_MARKER();\r
1722                                                 }\r
1723                                         }\r
1724                                         else\r
1725                                         {\r
1726                                                 break;\r
1727                                         }\r
1728                                 }\r
1729                         }\r
1730                         #else /* configUSE_QUEUE_SETS */\r
1731                         {\r
1732                                 /* Tasks that are removed from the event list will get added to\r
1733                                 the pending ready list as the scheduler is still suspended. */\r
1734                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1735                                 {\r
1736                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1737                                         {\r
1738                                                 /* The task waiting has a higher priority so record that a\r
1739                                                 context switch is required. */\r
1740                                                 vTaskMissedYield();\r
1741                                         }\r
1742                                         else\r
1743                                         {\r
1744                                                 mtCOVERAGE_TEST_MARKER();\r
1745                                         }\r
1746                                 }\r
1747                                 else\r
1748                                 {\r
1749                                         break;\r
1750                                 }\r
1751                         }\r
1752                         #endif /* configUSE_QUEUE_SETS */\r
1753 \r
1754                         --( pxQueue->xTxLock );\r
1755                 }\r
1756 \r
1757                 pxQueue->xTxLock = queueUNLOCKED;\r
1758         }\r
1759         taskEXIT_CRITICAL();\r
1760 \r
1761         /* Do the same for the Rx lock. */\r
1762         taskENTER_CRITICAL();\r
1763         {\r
1764                 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
1765                 {\r
1766                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1767                         {\r
1768                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1769                                 {\r
1770                                         vTaskMissedYield();\r
1771                                 }\r
1772                                 else\r
1773                                 {\r
1774                                         mtCOVERAGE_TEST_MARKER();\r
1775                                 }\r
1776 \r
1777                                 --( pxQueue->xRxLock );\r
1778                         }\r
1779                         else\r
1780                         {\r
1781                                 break;\r
1782                         }\r
1783                 }\r
1784 \r
1785                 pxQueue->xRxLock = queueUNLOCKED;\r
1786         }\r
1787         taskEXIT_CRITICAL();\r
1788 }\r
1789 /*-----------------------------------------------------------*/\r
1790 \r
1791 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
1792 {\r
1793 BaseType_t xReturn;\r
1794 \r
1795         taskENTER_CRITICAL();\r
1796         {\r
1797                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
1798                 {\r
1799                         xReturn = pdTRUE;\r
1800                 }\r
1801                 else\r
1802                 {\r
1803                         xReturn = pdFALSE;\r
1804                 }\r
1805         }\r
1806         taskEXIT_CRITICAL();\r
1807 \r
1808         return xReturn;\r
1809 }\r
1810 /*-----------------------------------------------------------*/\r
1811 \r
1812 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
1813 {\r
1814 BaseType_t xReturn;\r
1815 \r
1816         configASSERT( xQueue );\r
1817         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
1818         {\r
1819                 xReturn = pdTRUE;\r
1820         }\r
1821         else\r
1822         {\r
1823                 xReturn = pdFALSE;\r
1824         }\r
1825 \r
1826         return xReturn;\r
1827 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
1828 /*-----------------------------------------------------------*/\r
1829 \r
1830 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
1831 {\r
1832 BaseType_t xReturn;\r
1833 \r
1834         taskENTER_CRITICAL();\r
1835         {\r
1836                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
1837                 {\r
1838                         xReturn = pdTRUE;\r
1839                 }\r
1840                 else\r
1841                 {\r
1842                         xReturn = pdFALSE;\r
1843                 }\r
1844         }\r
1845         taskEXIT_CRITICAL();\r
1846 \r
1847         return xReturn;\r
1848 }\r
1849 /*-----------------------------------------------------------*/\r
1850 \r
1851 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
1852 {\r
1853 BaseType_t xReturn;\r
1854 \r
1855         configASSERT( xQueue );\r
1856         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
1857         {\r
1858                 xReturn = pdTRUE;\r
1859         }\r
1860         else\r
1861         {\r
1862                 xReturn = pdFALSE;\r
1863         }\r
1864 \r
1865         return xReturn;\r
1866 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
1867 /*-----------------------------------------------------------*/\r
1868 \r
1869 #if ( configUSE_CO_ROUTINES == 1 )\r
1870 \r
1871         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
1872         {\r
1873         BaseType_t xReturn;\r
1874         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1875 \r
1876                 /* If the queue is already full we may have to block.  A critical section\r
1877                 is required to prevent an interrupt removing something from the queue\r
1878                 between the check to see if the queue is full and blocking on the queue. */\r
1879                 portDISABLE_INTERRUPTS();\r
1880                 {\r
1881                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
1882                         {\r
1883                                 /* The queue is full - do we want to block or just leave without\r
1884                                 posting? */\r
1885                                 if( xTicksToWait > ( TickType_t ) 0 )\r
1886                                 {\r
1887                                         /* As this is called from a coroutine we cannot block directly, but\r
1888                                         return indicating that we need to block. */\r
1889                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
1890                                         portENABLE_INTERRUPTS();\r
1891                                         return errQUEUE_BLOCKED;\r
1892                                 }\r
1893                                 else\r
1894                                 {\r
1895                                         portENABLE_INTERRUPTS();\r
1896                                         return errQUEUE_FULL;\r
1897                                 }\r
1898                         }\r
1899                 }\r
1900                 portENABLE_INTERRUPTS();\r
1901 \r
1902                 portDISABLE_INTERRUPTS();\r
1903                 {\r
1904                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
1905                         {\r
1906                                 /* There is room in the queue, copy the data into the queue. */\r
1907                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
1908                                 xReturn = pdPASS;\r
1909 \r
1910                                 /* Were any co-routines waiting for data to become available? */\r
1911                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1912                                 {\r
1913                                         /* In this instance the co-routine could be placed directly\r
1914                                         into the ready list as we are within a critical section.\r
1915                                         Instead the same pending ready list mechanism is used as if\r
1916                                         the event were caused from within an interrupt. */\r
1917                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1918                                         {\r
1919                                                 /* The co-routine waiting has a higher priority so record\r
1920                                                 that a yield might be appropriate. */\r
1921                                                 xReturn = errQUEUE_YIELD;\r
1922                                         }\r
1923                                         else\r
1924                                         {\r
1925                                                 mtCOVERAGE_TEST_MARKER();\r
1926                                         }\r
1927                                 }\r
1928                                 else\r
1929                                 {\r
1930                                         mtCOVERAGE_TEST_MARKER();\r
1931                                 }\r
1932                         }\r
1933                         else\r
1934                         {\r
1935                                 xReturn = errQUEUE_FULL;\r
1936                         }\r
1937                 }\r
1938                 portENABLE_INTERRUPTS();\r
1939 \r
1940                 return xReturn;\r
1941         }\r
1942 \r
1943 #endif /* configUSE_CO_ROUTINES */\r
1944 /*-----------------------------------------------------------*/\r
1945 \r
1946 #if ( configUSE_CO_ROUTINES == 1 )\r
1947 \r
1948         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
1949         {\r
1950         BaseType_t xReturn;\r
1951         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1952 \r
1953                 /* If the queue is already empty we may have to block.  A critical section\r
1954                 is required to prevent an interrupt adding something to the queue\r
1955                 between the check to see if the queue is empty and blocking on the queue. */\r
1956                 portDISABLE_INTERRUPTS();\r
1957                 {\r
1958                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
1959                         {\r
1960                                 /* There are no messages in the queue, do we want to block or just\r
1961                                 leave with nothing? */\r
1962                                 if( xTicksToWait > ( TickType_t ) 0 )\r
1963                                 {\r
1964                                         /* As this is a co-routine we cannot block directly, but return\r
1965                                         indicating that we need to block. */\r
1966                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
1967                                         portENABLE_INTERRUPTS();\r
1968                                         return errQUEUE_BLOCKED;\r
1969                                 }\r
1970                                 else\r
1971                                 {\r
1972                                         portENABLE_INTERRUPTS();\r
1973                                         return errQUEUE_FULL;\r
1974                                 }\r
1975                         }\r
1976                         else\r
1977                         {\r
1978                                 mtCOVERAGE_TEST_MARKER();\r
1979                         }\r
1980                 }\r
1981                 portENABLE_INTERRUPTS();\r
1982 \r
1983                 portDISABLE_INTERRUPTS();\r
1984                 {\r
1985                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1986                         {\r
1987                                 /* Data is available from the queue. */\r
1988                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1989                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
1990                                 {\r
1991                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1992                                 }\r
1993                                 else\r
1994                                 {\r
1995                                         mtCOVERAGE_TEST_MARKER();\r
1996                                 }\r
1997                                 --( pxQueue->uxMessagesWaiting );\r
1998                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
1999 \r
2000                                 xReturn = pdPASS;\r
2001 \r
2002                                 /* Were any co-routines waiting for space to become available? */\r
2003                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2004                                 {\r
2005                                         /* In this instance the co-routine could be placed directly\r
2006                                         into the ready list as we are within a critical section.\r
2007                                         Instead the same pending ready list mechanism is used as if\r
2008                                         the event were caused from within an interrupt. */\r
2009                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2010                                         {\r
2011                                                 xReturn = errQUEUE_YIELD;\r
2012                                         }\r
2013                                         else\r
2014                                         {\r
2015                                                 mtCOVERAGE_TEST_MARKER();\r
2016                                         }\r
2017                                 }\r
2018                                 else\r
2019                                 {\r
2020                                         mtCOVERAGE_TEST_MARKER();\r
2021                                 }\r
2022                         }\r
2023                         else\r
2024                         {\r
2025                                 xReturn = pdFAIL;\r
2026                         }\r
2027                 }\r
2028                 portENABLE_INTERRUPTS();\r
2029 \r
2030                 return xReturn;\r
2031         }\r
2032 \r
2033 #endif /* configUSE_CO_ROUTINES */\r
2034 /*-----------------------------------------------------------*/\r
2035 \r
2036 #if ( configUSE_CO_ROUTINES == 1 )\r
2037 \r
2038         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2039         {\r
2040         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2041 \r
2042                 /* Cannot block within an ISR so if there is no space on the queue then\r
2043                 exit without doing anything. */\r
2044                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2045                 {\r
2046                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2047 \r
2048                         /* We only want to wake one co-routine per ISR, so check that a\r
2049                         co-routine has not already been woken. */\r
2050                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2051                         {\r
2052                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2053                                 {\r
2054                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2055                                         {\r
2056                                                 return pdTRUE;\r
2057                                         }\r
2058                                         else\r
2059                                         {\r
2060                                                 mtCOVERAGE_TEST_MARKER();\r
2061                                         }\r
2062                                 }\r
2063                                 else\r
2064                                 {\r
2065                                         mtCOVERAGE_TEST_MARKER();\r
2066                                 }\r
2067                         }\r
2068                         else\r
2069                         {\r
2070                                 mtCOVERAGE_TEST_MARKER();\r
2071                         }\r
2072                 }\r
2073                 else\r
2074                 {\r
2075                         mtCOVERAGE_TEST_MARKER();\r
2076                 }\r
2077 \r
2078                 return xCoRoutinePreviouslyWoken;\r
2079         }\r
2080 \r
2081 #endif /* configUSE_CO_ROUTINES */\r
2082 /*-----------------------------------------------------------*/\r
2083 \r
2084 #if ( configUSE_CO_ROUTINES == 1 )\r
2085 \r
2086         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2087         {\r
2088         BaseType_t xReturn;\r
2089         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2090 \r
2091                 /* We cannot block from an ISR, so check there is data available. If\r
2092                 not then just leave without doing anything. */\r
2093                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2094                 {\r
2095                         /* Copy the data from the queue. */\r
2096                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2097                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2098                         {\r
2099                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2100                         }\r
2101                         else\r
2102                         {\r
2103                                 mtCOVERAGE_TEST_MARKER();\r
2104                         }\r
2105                         --( pxQueue->uxMessagesWaiting );\r
2106                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2107 \r
2108                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2109                         {\r
2110                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2111                                 {\r
2112                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2113                                         {\r
2114                                                 *pxCoRoutineWoken = pdTRUE;\r
2115                                         }\r
2116                                         else\r
2117                                         {\r
2118                                                 mtCOVERAGE_TEST_MARKER();\r
2119                                         }\r
2120                                 }\r
2121                                 else\r
2122                                 {\r
2123                                         mtCOVERAGE_TEST_MARKER();\r
2124                                 }\r
2125                         }\r
2126                         else\r
2127                         {\r
2128                                 mtCOVERAGE_TEST_MARKER();\r
2129                         }\r
2130 \r
2131                         xReturn = pdPASS;\r
2132                 }\r
2133                 else\r
2134                 {\r
2135                         xReturn = pdFAIL;\r
2136                 }\r
2137 \r
2138                 return xReturn;\r
2139         }\r
2140 \r
2141 #endif /* configUSE_CO_ROUTINES */\r
2142 /*-----------------------------------------------------------*/\r
2143 \r
2144 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2145 \r
2146         void vQueueAddToRegistry( QueueHandle_t xQueue, char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2147         {\r
2148         UBaseType_t ux;\r
2149 \r
2150                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2151                 a free slot. */\r
2152                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2153                 {\r
2154                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2155                         {\r
2156                                 /* Store the information on this queue. */\r
2157                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2158                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2159                                 break;\r
2160                         }\r
2161                         else\r
2162                         {\r
2163                                 mtCOVERAGE_TEST_MARKER();\r
2164                         }\r
2165                 }\r
2166         }\r
2167 \r
2168 #endif /* configQUEUE_REGISTRY_SIZE */\r
2169 /*-----------------------------------------------------------*/\r
2170 \r
2171 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2172 \r
2173         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2174         {\r
2175         UBaseType_t ux;\r
2176 \r
2177                 /* See if the handle of the queue being unregistered in actually in the\r
2178                 registry. */\r
2179                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2180                 {\r
2181                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2182                         {\r
2183                                 /* Set the name to NULL to show that this slot if free again. */\r
2184                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2185                                 break;\r
2186                         }\r
2187                         else\r
2188                         {\r
2189                                 mtCOVERAGE_TEST_MARKER();\r
2190                         }\r
2191                 }\r
2192 \r
2193         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2194 \r
2195 #endif /* configQUEUE_REGISTRY_SIZE */\r
2196 /*-----------------------------------------------------------*/\r
2197 \r
2198 #if ( configUSE_TIMERS == 1 )\r
2199 \r
2200         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )\r
2201         {\r
2202         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2203 \r
2204                 /* This function should not be called by application code hence the\r
2205                 'Restricted' in its name.  It is not part of the public API.  It is\r
2206                 designed for use by kernel code, and has special calling requirements.\r
2207                 It can result in vListInsert() being called on a list that can only\r
2208                 possibly ever have one item in it, so the list will be fast, but even\r
2209                 so it should be called with the scheduler locked and not from a critical\r
2210                 section. */\r
2211 \r
2212                 /* Only do anything if there are no messages in the queue.  This function\r
2213                 will not actually cause the task to block, just place it on a blocked\r
2214                 list.  It will not block until the scheduler is unlocked - at which\r
2215                 time a yield will be performed.  If an item is added to the queue while\r
2216                 the queue is locked, and the calling task blocks on the queue, then the\r
2217                 calling task will be immediately unblocked when the queue is unlocked. */\r
2218                 prvLockQueue( pxQueue );\r
2219                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2220                 {\r
2221                         /* There is nothing in the queue, block for the specified period. */\r
2222                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
2223                 }\r
2224                 else\r
2225                 {\r
2226                         mtCOVERAGE_TEST_MARKER();\r
2227                 }\r
2228                 prvUnlockQueue( pxQueue );\r
2229         }\r
2230 \r
2231 #endif /* configUSE_TIMERS */\r
2232 /*-----------------------------------------------------------*/\r
2233 \r
2234 #if ( configUSE_QUEUE_SETS == 1 )\r
2235 \r
2236         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2237         {\r
2238         QueueSetHandle_t pxQueue;\r
2239 \r
2240                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2241 \r
2242                 return pxQueue;\r
2243         }\r
2244 \r
2245 #endif /* configUSE_QUEUE_SETS */\r
2246 /*-----------------------------------------------------------*/\r
2247 \r
2248 #if ( configUSE_QUEUE_SETS == 1 )\r
2249 \r
2250         BaseType_t xQueueAddToSet( QueueSetMember_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2251         {\r
2252         BaseType_t xReturn;\r
2253 \r
2254                 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2255                 {\r
2256                         /* Cannot add a queue/semaphore to more than one queue set. */\r
2257                         xReturn = pdFAIL;\r
2258                 }\r
2259                 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2260                 {\r
2261                         /* Cannot add a queue/semaphore to a queue set if there are already\r
2262                         items in the queue/semaphore. */\r
2263                         xReturn = pdFAIL;\r
2264                 }\r
2265                 else\r
2266                 {\r
2267                         taskENTER_CRITICAL();\r
2268                         {\r
2269                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2270                         }\r
2271                         taskEXIT_CRITICAL();\r
2272                         xReturn = pdPASS;\r
2273                 }\r
2274 \r
2275                 return xReturn;\r
2276         }\r
2277 \r
2278 #endif /* configUSE_QUEUE_SETS */\r
2279 /*-----------------------------------------------------------*/\r
2280 \r
2281 #if ( configUSE_QUEUE_SETS == 1 )\r
2282 \r
2283         BaseType_t xQueueRemoveFromSet( QueueSetMember_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2284         {\r
2285         BaseType_t xReturn;\r
2286         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2287 \r
2288                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2289                 {\r
2290                         /* The queue was not a member of the set. */\r
2291                         xReturn = pdFAIL;\r
2292                 }\r
2293                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2294                 {\r
2295                         /* It is dangerous to remove a queue from a set when the queue is\r
2296                         not empty because the queue set will still hold pending events for\r
2297                         the queue. */\r
2298                         xReturn = pdFAIL;\r
2299                 }\r
2300                 else\r
2301                 {\r
2302                         taskENTER_CRITICAL();\r
2303                         {\r
2304                                 /* The queue is no longer contained in the set. */\r
2305                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2306                         }\r
2307                         taskEXIT_CRITICAL();\r
2308                         xReturn = pdPASS;\r
2309                 }\r
2310 \r
2311                 return xReturn;\r
2312         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2313 \r
2314 #endif /* configUSE_QUEUE_SETS */\r
2315 /*-----------------------------------------------------------*/\r
2316 \r
2317 #if ( configUSE_QUEUE_SETS == 1 )\r
2318 \r
2319         QueueSetMember_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xBlockTimeTicks )\r
2320         {\r
2321         QueueSetMember_t xReturn = NULL;\r
2322 \r
2323                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xBlockTimeTicks, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2324                 return xReturn;\r
2325         }\r
2326 \r
2327 #endif /* configUSE_QUEUE_SETS */\r
2328 /*-----------------------------------------------------------*/\r
2329 \r
2330 #if ( configUSE_QUEUE_SETS == 1 )\r
2331 \r
2332         QueueSetMember_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2333         {\r
2334         QueueSetMember_t xReturn = NULL;\r
2335 \r
2336                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2337                 return xReturn;\r
2338         }\r
2339 \r
2340 #endif /* configUSE_QUEUE_SETS */\r
2341 /*-----------------------------------------------------------*/\r
2342 \r
2343 #if ( configUSE_QUEUE_SETS == 1 )\r
2344 \r
2345         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2346         {\r
2347         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2348         BaseType_t xReturn = pdFALSE;\r
2349 \r
2350                 configASSERT( pxQueueSetContainer );\r
2351                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2352 \r
2353                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2354                 {\r
2355                         traceQUEUE_SEND( pxQueueSetContainer );\r
2356                         /* The data copies is the handle of the queue that contains data. */\r
2357                         prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2358                         if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2359                         {\r
2360                                 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2361                                 {\r
2362                                         /* The task waiting has a higher priority */\r
2363                                         xReturn = pdTRUE;\r
2364                                 }\r
2365                                 else\r
2366                                 {\r
2367                                         mtCOVERAGE_TEST_MARKER();\r
2368                                 }\r
2369                         }\r
2370                         else\r
2371                         {\r
2372                                 mtCOVERAGE_TEST_MARKER();\r
2373                         }\r
2374                 }\r
2375                 else\r
2376                 {\r
2377                         mtCOVERAGE_TEST_MARKER();\r
2378                 }\r
2379 \r
2380                 return xReturn;\r
2381         }\r
2382 \r
2383 #endif /* configUSE_QUEUE_SETS */\r
2384 \r
2385 \r
2386 \r
2387 \r
2388 \r
2389 \r
2390 \r
2391 \r
2392 \r
2393 \r
2394 \r
2395 \r