]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Common scheduler code:
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V8.2.1 - Copyright (C) 2015 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     This file is part of the FreeRTOS distribution.\r
8 \r
9     FreeRTOS is free software; you can redistribute it and/or modify it under\r
10     the terms of the GNU General Public License (version 2) as published by the\r
11     Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
12 \r
13     ***************************************************************************\r
14     >>!   NOTE: The modification to the GPL is included to allow you to     !<<\r
15     >>!   distribute a combined work that includes FreeRTOS without being   !<<\r
16     >>!   obliged to provide the source code for proprietary components     !<<\r
17     >>!   outside of the FreeRTOS kernel.                                   !<<\r
18     ***************************************************************************\r
19 \r
20     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
21     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
22     FOR A PARTICULAR PURPOSE.  Full license text is available on the following\r
23     link: http://www.freertos.org/a00114.html\r
24 \r
25     ***************************************************************************\r
26      *                                                                       *\r
27      *    FreeRTOS provides completely free yet professionally developed,    *\r
28      *    robust, strictly quality controlled, supported, and cross          *\r
29      *    platform software that is more than just the market leader, it     *\r
30      *    is the industry's de facto standard.                               *\r
31      *                                                                       *\r
32      *    Help yourself get started quickly while simultaneously helping     *\r
33      *    to support the FreeRTOS project by purchasing a FreeRTOS           *\r
34      *    tutorial book, reference manual, or both:                          *\r
35      *    http://www.FreeRTOS.org/Documentation                              *\r
36      *                                                                       *\r
37     ***************************************************************************\r
38 \r
39     http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading\r
40     the FAQ page "My application does not run, what could be wrong?".  Have you\r
41     defined configASSERT()?\r
42 \r
43     http://www.FreeRTOS.org/support - In return for receiving this top quality\r
44     embedded software for free we request you assist our global community by\r
45     participating in the support forum.\r
46 \r
47     http://www.FreeRTOS.org/training - Investing in training allows your team to\r
48     be as productive as possible as early as possible.  Now you can receive\r
49     FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers\r
50     Ltd, and the world's leading authority on the world's leading RTOS.\r
51 \r
52     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
53     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
54     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
55 \r
56     http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
57     Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
58 \r
59     http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High\r
60     Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
61     licenses offer ticketed support, indemnification and commercial middleware.\r
62 \r
63     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
64     engineered and independently SIL3 certified version for use in safety and\r
65     mission critical applications that require provable dependability.\r
66 \r
67     1 tab == 4 spaces!\r
68 */\r
69 \r
70 #include <stdlib.h>\r
71 #include <string.h>\r
72 \r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
74 all the API functions to use the MPU wrappers.  That should only be done when\r
75 task.h is included from an application file. */\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
77 \r
78 #include "FreeRTOS.h"\r
79 #include "task.h"\r
80 #include "queue.h"\r
81 \r
82 #if ( configUSE_CO_ROUTINES == 1 )\r
83         #include "croutine.h"\r
84 #endif\r
85 \r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
88 header files above, but not in this file, in order to generate the correct\r
89 privileged Vs unprivileged linkage and placement. */\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
91 \r
92 \r
93 /* Constants used with the xRxLock and xTxLock structure members. */\r
94 #define queueUNLOCKED                                   ( ( BaseType_t ) -1 )\r
95 #define queueLOCKED_UNMODIFIED                  ( ( BaseType_t ) 0 )\r
96 \r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
98 pcTail members are used as pointers into the queue storage area.  When the\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
101 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
102 names to the pcHead and pcTail structure members to ensure the readability of\r
103 the code is maintained despite this dual use of two structure members.  An\r
104 alternative implementation would be to use a union, but use of a union is\r
105 against the coding standard (although an exception to the standard has been\r
106 permitted where the dual use also significantly changes the type of the\r
107 structure member). */\r
108 #define pxMutexHolder                                   pcTail\r
109 #define uxQueueType                                             pcHead\r
110 #define queueQUEUE_IS_MUTEX                             NULL\r
111 \r
112 /* Semaphores do not actually store or copy data, so have an item size of\r
113 zero. */\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
115 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
116 \r
117 #if( configUSE_PREEMPTION == 0 )\r
118         /* If the cooperative scheduler is being used then a yield should not be\r
119         performed just because a higher priority task has been woken. */\r
120         #define queueYIELD_IF_USING_PREEMPTION()\r
121 #else\r
122         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
123 #endif\r
124 \r
125 /*\r
126  * Definition of the queue used by the scheduler.\r
127  * Items are queued by copy, not reference.  See the following link for the\r
128  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
129  */\r
130 typedef struct QueueDefinition\r
131 {\r
132         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
133         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
134         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
135 \r
136         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
137         {\r
138                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
139                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
140         } u;\r
141 \r
142         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
143         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
144 \r
145         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
146         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
147         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
148 \r
149         volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
150         volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
151 \r
152         #if ( configUSE_TRACE_FACILITY == 1 )\r
153                 UBaseType_t uxQueueNumber;\r
154                 uint8_t ucQueueType;\r
155         #endif\r
156 \r
157         #if ( configUSE_QUEUE_SETS == 1 )\r
158                 struct QueueDefinition *pxQueueSetContainer;\r
159         #endif\r
160 \r
161 } xQUEUE;\r
162 \r
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
164 name below to enable the use of older kernel aware debuggers. */\r
165 typedef xQUEUE Queue_t;\r
166 \r
167 /*-----------------------------------------------------------*/\r
168 \r
169 /*\r
170  * The queue registry is just a means for kernel aware debuggers to locate\r
171  * queue structures.  It has no other purpose so is an optional component.\r
172  */\r
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
174 \r
175         /* The type stored within the queue registry array.  This allows a name\r
176         to be assigned to each queue making kernel aware debugging a little\r
177         more user friendly. */\r
178         typedef struct QUEUE_REGISTRY_ITEM\r
179         {\r
180                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
181                 QueueHandle_t xHandle;\r
182         } xQueueRegistryItem;\r
183 \r
184         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
185         new xQueueRegistryItem name below to enable the use of older kernel aware\r
186         debuggers. */\r
187         typedef xQueueRegistryItem QueueRegistryItem_t;\r
188 \r
189         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
190         The pcQueueName member of a structure being NULL is indicative of the\r
191         array position being vacant. */\r
192         PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
193 \r
194 #endif /* configQUEUE_REGISTRY_SIZE */\r
195 \r
196 /*\r
197  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
198  * prevent an ISR from adding or removing items to the queue, but does prevent\r
199  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
200  * queue is locked it will instead increment the appropriate queue lock count\r
201  * to indicate that a task may require unblocking.  When the queue in unlocked\r
202  * these lock counts are inspected, and the appropriate action taken.\r
203  */\r
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
205 \r
206 /*\r
207  * Uses a critical section to determine if there is any data in a queue.\r
208  *\r
209  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
210  */\r
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
212 \r
213 /*\r
214  * Uses a critical section to determine if there is any space in a queue.\r
215  *\r
216  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
217  */\r
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
219 \r
220 /*\r
221  * Copies an item into the queue, either at the front of the queue or the\r
222  * back of the queue.\r
223  */\r
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
225 \r
226 /*\r
227  * Copies an item out of a queue.\r
228  */\r
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
230 \r
231 #if ( configUSE_QUEUE_SETS == 1 )\r
232         /*\r
233          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
234          * the queue set that the queue contains data.\r
235          */\r
236         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
237 #endif\r
238 \r
239 /*-----------------------------------------------------------*/\r
240 \r
241 /*\r
242  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
243  * accessing the queue event lists.\r
244  */\r
245 #define prvLockQueue( pxQueue )                                                         \\r
246         taskENTER_CRITICAL();                                                                   \\r
247         {                                                                                                               \\r
248                 if( ( pxQueue )->xRxLock == queueUNLOCKED )                     \\r
249                 {                                                                                                       \\r
250                         ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \\r
251                 }                                                                                                       \\r
252                 if( ( pxQueue )->xTxLock == queueUNLOCKED )                     \\r
253                 {                                                                                                       \\r
254                         ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \\r
255                 }                                                                                                       \\r
256         }                                                                                                               \\r
257         taskEXIT_CRITICAL()\r
258 /*-----------------------------------------------------------*/\r
259 \r
260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
261 {\r
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
263 \r
264         configASSERT( pxQueue );\r
265 \r
266         taskENTER_CRITICAL();\r
267         {\r
268                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
269                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
270                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
271                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
272                 pxQueue->xRxLock = queueUNLOCKED;\r
273                 pxQueue->xTxLock = queueUNLOCKED;\r
274 \r
275                 if( xNewQueue == pdFALSE )\r
276                 {\r
277                         /* If there are tasks blocked waiting to read from the queue, then\r
278                         the tasks will remain blocked as after this function exits the queue\r
279                         will still be empty.  If there are tasks blocked waiting to write to\r
280                         the queue, then one should be unblocked as after this function exits\r
281                         it will be possible to write to it. */\r
282                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
283                         {\r
284                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
285                                 {\r
286                                         queueYIELD_IF_USING_PREEMPTION();\r
287                                 }\r
288                                 else\r
289                                 {\r
290                                         mtCOVERAGE_TEST_MARKER();\r
291                                 }\r
292                         }\r
293                         else\r
294                         {\r
295                                 mtCOVERAGE_TEST_MARKER();\r
296                         }\r
297                 }\r
298                 else\r
299                 {\r
300                         /* Ensure the event queues start in the correct state. */\r
301                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
302                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
303                 }\r
304         }\r
305         taskEXIT_CRITICAL();\r
306 \r
307         /* A value is returned for calling semantic consistency with previous\r
308         versions. */\r
309         return pdPASS;\r
310 }\r
311 /*-----------------------------------------------------------*/\r
312 \r
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
314 {\r
315 Queue_t *pxNewQueue;\r
316 size_t xQueueSizeInBytes;\r
317 QueueHandle_t xReturn = NULL;\r
318 \r
319         /* Remove compiler warnings about unused parameters should\r
320         configUSE_TRACE_FACILITY not be set to 1. */\r
321         ( void ) ucQueueType;\r
322 \r
323         configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
324 \r
325         if( uxItemSize == ( UBaseType_t ) 0 )\r
326         {\r
327                 /* There is not going to be a queue storage area. */\r
328                 xQueueSizeInBytes = ( size_t ) 0;\r
329         }\r
330         else\r
331         {\r
332                 /* The queue is one byte longer than asked for to make wrap checking\r
333                 easier/faster. */\r
334                 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
335         }\r
336 \r
337         /* Allocate the new queue structure and storage area. */\r
338         pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
339 \r
340         if( pxNewQueue != NULL )\r
341         {\r
342                 if( uxItemSize == ( UBaseType_t ) 0 )\r
343                 {\r
344                         /* No RAM was allocated for the queue storage area, but PC head\r
345                         cannot be set to NULL because NULL is used as a key to say the queue\r
346                         is used as a mutex.  Therefore just set pcHead to point to the queue\r
347                         as a benign value that is known to be within the memory map. */\r
348                         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
349                 }\r
350                 else\r
351                 {\r
352                         /* Jump past the queue structure to find the location of the queue\r
353                         storage area. */\r
354                         pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );\r
355                 }\r
356 \r
357                 /* Initialise the queue members as described above where the queue type\r
358                 is defined. */\r
359                 pxNewQueue->uxLength = uxQueueLength;\r
360                 pxNewQueue->uxItemSize = uxItemSize;\r
361                 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
362 \r
363                 #if ( configUSE_TRACE_FACILITY == 1 )\r
364                 {\r
365                         pxNewQueue->ucQueueType = ucQueueType;\r
366                 }\r
367                 #endif /* configUSE_TRACE_FACILITY */\r
368 \r
369                 #if( configUSE_QUEUE_SETS == 1 )\r
370                 {\r
371                         pxNewQueue->pxQueueSetContainer = NULL;\r
372                 }\r
373                 #endif /* configUSE_QUEUE_SETS */\r
374 \r
375                 traceQUEUE_CREATE( pxNewQueue );\r
376                 xReturn = pxNewQueue;\r
377         }\r
378         else\r
379         {\r
380                 mtCOVERAGE_TEST_MARKER();\r
381         }\r
382 \r
383         configASSERT( xReturn );\r
384 \r
385         return xReturn;\r
386 }\r
387 /*-----------------------------------------------------------*/\r
388 \r
389 #if ( configUSE_MUTEXES == 1 )\r
390 \r
391         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
392         {\r
393         Queue_t *pxNewQueue;\r
394 \r
395                 /* Prevent compiler warnings about unused parameters if\r
396                 configUSE_TRACE_FACILITY does not equal 1. */\r
397                 ( void ) ucQueueType;\r
398 \r
399                 /* Allocate the new queue structure. */\r
400                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
401                 if( pxNewQueue != NULL )\r
402                 {\r
403                         /* Information required for priority inheritance. */\r
404                         pxNewQueue->pxMutexHolder = NULL;\r
405                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
406 \r
407                         /* Queues used as a mutex no data is actually copied into or out\r
408                         of the queue. */\r
409                         pxNewQueue->pcWriteTo = NULL;\r
410                         pxNewQueue->u.pcReadFrom = NULL;\r
411 \r
412                         /* Each mutex has a length of 1 (like a binary semaphore) and\r
413                         an item size of 0 as nothing is actually copied into or out\r
414                         of the mutex. */\r
415                         pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
416                         pxNewQueue->uxLength = ( UBaseType_t ) 1U;\r
417                         pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;\r
418                         pxNewQueue->xRxLock = queueUNLOCKED;\r
419                         pxNewQueue->xTxLock = queueUNLOCKED;\r
420 \r
421                         #if ( configUSE_TRACE_FACILITY == 1 )\r
422                         {\r
423                                 pxNewQueue->ucQueueType = ucQueueType;\r
424                         }\r
425                         #endif\r
426 \r
427                         #if ( configUSE_QUEUE_SETS == 1 )\r
428                         {\r
429                                 pxNewQueue->pxQueueSetContainer = NULL;\r
430                         }\r
431                         #endif\r
432 \r
433                         /* Ensure the event queues start with the correct state. */\r
434                         vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );\r
435                         vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );\r
436 \r
437                         traceCREATE_MUTEX( pxNewQueue );\r
438 \r
439                         /* Start with the semaphore in the expected state. */\r
440                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
441                 }\r
442                 else\r
443                 {\r
444                         traceCREATE_MUTEX_FAILED();\r
445                 }\r
446 \r
447                 configASSERT( pxNewQueue );\r
448                 return pxNewQueue;\r
449         }\r
450 \r
451 #endif /* configUSE_MUTEXES */\r
452 /*-----------------------------------------------------------*/\r
453 \r
454 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
455 \r
456         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
457         {\r
458         void *pxReturn;\r
459 \r
460                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
461                 be called directly.  Note:  This is a good way of determining if the\r
462                 calling task is the mutex holder, but not a good way of determining the\r
463                 identity of the mutex holder, as the holder may change between the\r
464                 following critical section exiting and the function returning. */\r
465                 taskENTER_CRITICAL();\r
466                 {\r
467                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
468                         {\r
469                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
470                         }\r
471                         else\r
472                         {\r
473                                 pxReturn = NULL;\r
474                         }\r
475                 }\r
476                 taskEXIT_CRITICAL();\r
477 \r
478                 return pxReturn;\r
479         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
480 \r
481 #endif\r
482 /*-----------------------------------------------------------*/\r
483 \r
484 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
485 \r
486         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
487         {\r
488         BaseType_t xReturn;\r
489         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
490 \r
491                 configASSERT( pxMutex );\r
492 \r
493                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
494                 change outside of this task.  If this task does not hold the mutex then\r
495                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
496                 this is the only condition we are interested in it does not matter if\r
497                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
498                 mutual exclusion is required to test the pxMutexHolder variable. */\r
499                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
500                 {\r
501                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
502 \r
503                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
504                         the task handle, therefore no underflow check is required.  Also,\r
505                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
506                         there can only be one, no mutual exclusion is required to modify the\r
507                         uxRecursiveCallCount member. */\r
508                         ( pxMutex->u.uxRecursiveCallCount )--;\r
509 \r
510                         /* Have we unwound the call count? */\r
511                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
512                         {\r
513                                 /* Return the mutex.  This will automatically unblock any other\r
514                                 task that might be waiting to access the mutex. */\r
515                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
516                         }\r
517                         else\r
518                         {\r
519                                 mtCOVERAGE_TEST_MARKER();\r
520                         }\r
521 \r
522                         xReturn = pdPASS;\r
523                 }\r
524                 else\r
525                 {\r
526                         /* The mutex cannot be given because the calling task is not the\r
527                         holder. */\r
528                         xReturn = pdFAIL;\r
529 \r
530                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
531                 }\r
532 \r
533                 return xReturn;\r
534         }\r
535 \r
536 #endif /* configUSE_RECURSIVE_MUTEXES */\r
537 /*-----------------------------------------------------------*/\r
538 \r
539 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
540 \r
541         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
542         {\r
543         BaseType_t xReturn;\r
544         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
545 \r
546                 configASSERT( pxMutex );\r
547 \r
548                 /* Comments regarding mutual exclusion as per those within\r
549                 xQueueGiveMutexRecursive(). */\r
550 \r
551                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
552 \r
553                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
554                 {\r
555                         ( pxMutex->u.uxRecursiveCallCount )++;\r
556                         xReturn = pdPASS;\r
557                 }\r
558                 else\r
559                 {\r
560                         xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
561 \r
562                         /* pdPASS will only be returned if the mutex was successfully\r
563                         obtained.  The calling task may have entered the Blocked state\r
564                         before reaching here. */\r
565                         if( xReturn == pdPASS )\r
566                         {\r
567                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
568                         }\r
569                         else\r
570                         {\r
571                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
572                         }\r
573                 }\r
574 \r
575                 return xReturn;\r
576         }\r
577 \r
578 #endif /* configUSE_RECURSIVE_MUTEXES */\r
579 /*-----------------------------------------------------------*/\r
580 \r
581 #if ( configUSE_COUNTING_SEMAPHORES == 1 )\r
582 \r
583         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
584         {\r
585         QueueHandle_t xHandle;\r
586 \r
587                 configASSERT( uxMaxCount != 0 );\r
588                 configASSERT( uxInitialCount <= uxMaxCount );\r
589 \r
590                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
591 \r
592                 if( xHandle != NULL )\r
593                 {\r
594                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
595 \r
596                         traceCREATE_COUNTING_SEMAPHORE();\r
597                 }\r
598                 else\r
599                 {\r
600                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
601                 }\r
602 \r
603                 configASSERT( xHandle );\r
604                 return xHandle;\r
605         }\r
606 \r
607 #endif /* configUSE_COUNTING_SEMAPHORES */\r
608 /*-----------------------------------------------------------*/\r
609 \r
610 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
611 {\r
612 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
613 TimeOut_t xTimeOut;\r
614 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
615 \r
616         configASSERT( pxQueue );\r
617         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
618         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
619         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
620         {\r
621                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
622         }\r
623         #endif\r
624 \r
625 \r
626         /* This function relaxes the coding standard somewhat to allow return\r
627         statements within the function itself.  This is done in the interest\r
628         of execution time efficiency. */\r
629         for( ;; )\r
630         {\r
631                 taskENTER_CRITICAL();\r
632                 {\r
633                         /* Is there room on the queue now?  The running task must be the\r
634                         highest priority task wanting to access the queue.  If the head item\r
635                         in the queue is to be overwritten then it does not matter if the\r
636                         queue is full. */\r
637                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
638                         {\r
639                                 traceQUEUE_SEND( pxQueue );\r
640                                 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
641 \r
642                                 #if ( configUSE_QUEUE_SETS == 1 )\r
643                                 {\r
644                                         if( pxQueue->pxQueueSetContainer != NULL )\r
645                                         {\r
646                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
647                                                 {\r
648                                                         /* The queue is a member of a queue set, and posting\r
649                                                         to the queue set caused a higher priority task to\r
650                                                         unblock. A context switch is required. */\r
651                                                         queueYIELD_IF_USING_PREEMPTION();\r
652                                                 }\r
653                                                 else\r
654                                                 {\r
655                                                         mtCOVERAGE_TEST_MARKER();\r
656                                                 }\r
657                                         }\r
658                                         else\r
659                                         {\r
660                                                 /* If there was a task waiting for data to arrive on the\r
661                                                 queue then unblock it now. */\r
662                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
663                                                 {\r
664                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
665                                                         {\r
666                                                                 /* The unblocked task has a priority higher than\r
667                                                                 our own so yield immediately.  Yes it is ok to\r
668                                                                 do this from within the critical section - the\r
669                                                                 kernel takes care of that. */\r
670                                                                 queueYIELD_IF_USING_PREEMPTION();\r
671                                                         }\r
672                                                         else\r
673                                                         {\r
674                                                                 mtCOVERAGE_TEST_MARKER();\r
675                                                         }\r
676                                                 }\r
677                                                 else if( xYieldRequired != pdFALSE )\r
678                                                 {\r
679                                                         /* This path is a special case that will only get\r
680                                                         executed if the task was holding multiple mutexes\r
681                                                         and the mutexes were given back in an order that is\r
682                                                         different to that in which they were taken. */\r
683                                                         queueYIELD_IF_USING_PREEMPTION();\r
684                                                 }\r
685                                                 else\r
686                                                 {\r
687                                                         mtCOVERAGE_TEST_MARKER();\r
688                                                 }\r
689                                         }\r
690                                 }\r
691                                 #else /* configUSE_QUEUE_SETS */\r
692                                 {\r
693                                         /* If there was a task waiting for data to arrive on the\r
694                                         queue then unblock it now. */\r
695                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
696                                         {\r
697                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
698                                                 {\r
699                                                         /* The unblocked task has a priority higher than\r
700                                                         our own so yield immediately.  Yes it is ok to do\r
701                                                         this from within the critical section - the kernel\r
702                                                         takes care of that. */\r
703                                                         queueYIELD_IF_USING_PREEMPTION();\r
704                                                 }\r
705                                                 else\r
706                                                 {\r
707                                                         mtCOVERAGE_TEST_MARKER();\r
708                                                 }\r
709                                         }\r
710                                         else if( xYieldRequired != pdFALSE )\r
711                                         {\r
712                                                 /* This path is a special case that will only get\r
713                                                 executed if the task was holding multiple mutexes and\r
714                                                 the mutexes were given back in an order that is\r
715                                                 different to that in which they were taken. */\r
716                                                 queueYIELD_IF_USING_PREEMPTION();\r
717                                         }\r
718                                         else\r
719                                         {\r
720                                                 mtCOVERAGE_TEST_MARKER();\r
721                                         }\r
722                                 }\r
723                                 #endif /* configUSE_QUEUE_SETS */\r
724 \r
725                                 taskEXIT_CRITICAL();\r
726                                 return pdPASS;\r
727                         }\r
728                         else\r
729                         {\r
730                                 if( xTicksToWait == ( TickType_t ) 0 )\r
731                                 {\r
732                                         /* The queue was full and no block time is specified (or\r
733                                         the block time has expired) so leave now. */\r
734                                         taskEXIT_CRITICAL();\r
735 \r
736                                         /* Return to the original privilege level before exiting\r
737                                         the function. */\r
738                                         traceQUEUE_SEND_FAILED( pxQueue );\r
739                                         return errQUEUE_FULL;\r
740                                 }\r
741                                 else if( xEntryTimeSet == pdFALSE )\r
742                                 {\r
743                                         /* The queue was full and a block time was specified so\r
744                                         configure the timeout structure. */\r
745                                         vTaskSetTimeOutState( &xTimeOut );\r
746                                         xEntryTimeSet = pdTRUE;\r
747                                 }\r
748                                 else\r
749                                 {\r
750                                         /* Entry time was already set. */\r
751                                         mtCOVERAGE_TEST_MARKER();\r
752                                 }\r
753                         }\r
754                 }\r
755                 taskEXIT_CRITICAL();\r
756 \r
757                 /* Interrupts and other tasks can send to and receive from the queue\r
758                 now the critical section has been exited. */\r
759 \r
760                 vTaskSuspendAll();\r
761                 prvLockQueue( pxQueue );\r
762 \r
763                 /* Update the timeout state to see if it has expired yet. */\r
764                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
765                 {\r
766                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
767                         {\r
768                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
769                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
770 \r
771                                 /* Unlocking the queue means queue events can effect the\r
772                                 event list.  It is possible     that interrupts occurring now\r
773                                 remove this task from the event list again - but as the\r
774                                 scheduler is suspended the task will go onto the pending\r
775                                 ready last instead of the actual ready list. */\r
776                                 prvUnlockQueue( pxQueue );\r
777 \r
778                                 /* Resuming the scheduler will move tasks from the pending\r
779                                 ready list into the ready list - so it is feasible that this\r
780                                 task is already in a ready list before it yields - in which\r
781                                 case the yield will not cause a context switch unless there\r
782                                 is also a higher priority task in the pending ready list. */\r
783                                 if( xTaskResumeAll() == pdFALSE )\r
784                                 {\r
785                                         portYIELD_WITHIN_API();\r
786                                 }\r
787                         }\r
788                         else\r
789                         {\r
790                                 /* Try again. */\r
791                                 prvUnlockQueue( pxQueue );\r
792                                 ( void ) xTaskResumeAll();\r
793                         }\r
794                 }\r
795                 else\r
796                 {\r
797                         /* The timeout has expired. */\r
798                         prvUnlockQueue( pxQueue );\r
799                         ( void ) xTaskResumeAll();\r
800 \r
801                         /* Return to the original privilege level before exiting the\r
802                         function. */\r
803                         traceQUEUE_SEND_FAILED( pxQueue );\r
804                         return errQUEUE_FULL;\r
805                 }\r
806         }\r
807 }\r
808 /*-----------------------------------------------------------*/\r
809 \r
810 #if ( configUSE_ALTERNATIVE_API == 1 )\r
811 \r
812         BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )\r
813         {\r
814         BaseType_t xEntryTimeSet = pdFALSE;\r
815         TimeOut_t xTimeOut;\r
816         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
817 \r
818                 configASSERT( pxQueue );\r
819                 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
820 \r
821                 for( ;; )\r
822                 {\r
823                         taskENTER_CRITICAL();\r
824                         {\r
825                                 /* Is there room on the queue now?  To be running we must be\r
826                                 the highest priority task wanting to access the queue. */\r
827                                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
828                                 {\r
829                                         traceQUEUE_SEND( pxQueue );\r
830                                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
831 \r
832                                         /* If there was a task waiting for data to arrive on the\r
833                                         queue then unblock it now. */\r
834                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
835                                         {\r
836                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
837                                                 {\r
838                                                         /* The unblocked task has a priority higher than\r
839                                                         our own so yield immediately. */\r
840                                                         portYIELD_WITHIN_API();\r
841                                                 }\r
842                                                 else\r
843                                                 {\r
844                                                         mtCOVERAGE_TEST_MARKER();\r
845                                                 }\r
846                                         }\r
847                                         else\r
848                                         {\r
849                                                 mtCOVERAGE_TEST_MARKER();\r
850                                         }\r
851 \r
852                                         taskEXIT_CRITICAL();\r
853                                         return pdPASS;\r
854                                 }\r
855                                 else\r
856                                 {\r
857                                         if( xTicksToWait == ( TickType_t ) 0 )\r
858                                         {\r
859                                                 taskEXIT_CRITICAL();\r
860                                                 return errQUEUE_FULL;\r
861                                         }\r
862                                         else if( xEntryTimeSet == pdFALSE )\r
863                                         {\r
864                                                 vTaskSetTimeOutState( &xTimeOut );\r
865                                                 xEntryTimeSet = pdTRUE;\r
866                                         }\r
867                                 }\r
868                         }\r
869                         taskEXIT_CRITICAL();\r
870 \r
871                         taskENTER_CRITICAL();\r
872                         {\r
873                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
874                                 {\r
875                                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
876                                         {\r
877                                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
878                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
879                                                 portYIELD_WITHIN_API();\r
880                                         }\r
881                                         else\r
882                                         {\r
883                                                 mtCOVERAGE_TEST_MARKER();\r
884                                         }\r
885                                 }\r
886                                 else\r
887                                 {\r
888                                         taskEXIT_CRITICAL();\r
889                                         traceQUEUE_SEND_FAILED( pxQueue );\r
890                                         return errQUEUE_FULL;\r
891                                 }\r
892                         }\r
893                         taskEXIT_CRITICAL();\r
894                 }\r
895         }\r
896 \r
897 #endif /* configUSE_ALTERNATIVE_API */\r
898 /*-----------------------------------------------------------*/\r
899 \r
900 #if ( configUSE_ALTERNATIVE_API == 1 )\r
901 \r
902         BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )\r
903         {\r
904         BaseType_t xEntryTimeSet = pdFALSE;\r
905         TimeOut_t xTimeOut;\r
906         int8_t *pcOriginalReadPosition;\r
907         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
908 \r
909                 configASSERT( pxQueue );\r
910                 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
911 \r
912                 for( ;; )\r
913                 {\r
914                         taskENTER_CRITICAL();\r
915                         {\r
916                                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
917                                 {\r
918                                         /* Remember our read position in case we are just peeking. */\r
919                                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
920 \r
921                                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
922 \r
923                                         if( xJustPeeking == pdFALSE )\r
924                                         {\r
925                                                 traceQUEUE_RECEIVE( pxQueue );\r
926 \r
927                                                 /* Data is actually being removed (not just peeked). */\r
928                                                 --( pxQueue->uxMessagesWaiting );\r
929 \r
930                                                 #if ( configUSE_MUTEXES == 1 )\r
931                                                 {\r
932                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
933                                                         {\r
934                                                                 /* Record the information required to implement\r
935                                                                 priority inheritance should it become necessary. */\r
936                                                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();\r
937                                                         }\r
938                                                         else\r
939                                                         {\r
940                                                                 mtCOVERAGE_TEST_MARKER();\r
941                                                         }\r
942                                                 }\r
943                                                 #endif\r
944 \r
945                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
946                                                 {\r
947                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
948                                                         {\r
949                                                                 portYIELD_WITHIN_API();\r
950                                                         }\r
951                                                         else\r
952                                                         {\r
953                                                                 mtCOVERAGE_TEST_MARKER();\r
954                                                         }\r
955                                                 }\r
956                                         }\r
957                                         else\r
958                                         {\r
959                                                 traceQUEUE_PEEK( pxQueue );\r
960 \r
961                                                 /* The data is not being removed, so reset our read\r
962                                                 pointer. */\r
963                                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
964 \r
965                                                 /* The data is being left in the queue, so see if there are\r
966                                                 any other tasks waiting for the data. */\r
967                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
968                                                 {\r
969                                                         /* Tasks that are removed from the event list will get added to\r
970                                                         the pending ready list as the scheduler is still suspended. */\r
971                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
972                                                         {\r
973                                                                 /* The task waiting has a higher priority than this task. */\r
974                                                                 portYIELD_WITHIN_API();\r
975                                                         }\r
976                                                         else\r
977                                                         {\r
978                                                                 mtCOVERAGE_TEST_MARKER();\r
979                                                         }\r
980                                                 }\r
981                                                 else\r
982                                                 {\r
983                                                         mtCOVERAGE_TEST_MARKER();\r
984                                                 }\r
985                                         }\r
986 \r
987                                         taskEXIT_CRITICAL();\r
988                                         return pdPASS;\r
989                                 }\r
990                                 else\r
991                                 {\r
992                                         if( xTicksToWait == ( TickType_t ) 0 )\r
993                                         {\r
994                                                 taskEXIT_CRITICAL();\r
995                                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
996                                                 return errQUEUE_EMPTY;\r
997                                         }\r
998                                         else if( xEntryTimeSet == pdFALSE )\r
999                                         {\r
1000                                                 vTaskSetTimeOutState( &xTimeOut );\r
1001                                                 xEntryTimeSet = pdTRUE;\r
1002                                         }\r
1003                                 }\r
1004                         }\r
1005                         taskEXIT_CRITICAL();\r
1006 \r
1007                         taskENTER_CRITICAL();\r
1008                         {\r
1009                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1010                                 {\r
1011                                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1012                                         {\r
1013                                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1014 \r
1015                                                 #if ( configUSE_MUTEXES == 1 )\r
1016                                                 {\r
1017                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1018                                                         {\r
1019                                                                 taskENTER_CRITICAL();\r
1020                                                                 {\r
1021                                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1022                                                                 }\r
1023                                                                 taskEXIT_CRITICAL();\r
1024                                                         }\r
1025                                                         else\r
1026                                                         {\r
1027                                                                 mtCOVERAGE_TEST_MARKER();\r
1028                                                         }\r
1029                                                 }\r
1030                                                 #endif\r
1031 \r
1032                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1033                                                 portYIELD_WITHIN_API();\r
1034                                         }\r
1035                                         else\r
1036                                         {\r
1037                                                 mtCOVERAGE_TEST_MARKER();\r
1038                                         }\r
1039                                 }\r
1040                                 else\r
1041                                 {\r
1042                                         taskEXIT_CRITICAL();\r
1043                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1044                                         return errQUEUE_EMPTY;\r
1045                                 }\r
1046                         }\r
1047                         taskEXIT_CRITICAL();\r
1048                 }\r
1049         }\r
1050 \r
1051 \r
1052 #endif /* configUSE_ALTERNATIVE_API */\r
1053 /*-----------------------------------------------------------*/\r
1054 \r
1055 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
1056 {\r
1057 BaseType_t xReturn;\r
1058 UBaseType_t uxSavedInterruptStatus;\r
1059 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1060 \r
1061         configASSERT( pxQueue );\r
1062         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1063         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
1064 \r
1065         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1066         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1067         above the maximum system call priority are kept permanently enabled, even\r
1068         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1069         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1070         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1071         failure if a FreeRTOS API function is called from an interrupt that has been\r
1072         assigned a priority above the configured maximum system call priority.\r
1073         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1074         that have been assigned a priority at or (logically) below the maximum\r
1075         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1076         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1077         More information (albeit Cortex-M specific) is provided on the following\r
1078         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1079         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1080 \r
1081         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
1082         in the queue.  Also don't directly wake a task that was blocked on a queue\r
1083         read, instead return a flag to say whether a context switch is required or\r
1084         not (i.e. has a task with a higher priority than us been woken by this\r
1085         post). */\r
1086         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1087         {\r
1088                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
1089                 {\r
1090                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1091 \r
1092                         /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
1093                         semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
1094                         in a task disinheriting a priority and prvCopyDataToQueue() can be\r
1095                         called here even though the disinherit function does not check if\r
1096                         the scheduler is suspended before accessing the ready lists. */\r
1097                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1098 \r
1099                         /* The event list is not altered if the queue is locked.  This will\r
1100                         be done when the queue is unlocked later. */\r
1101                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1102                         {\r
1103                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1104                                 {\r
1105                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1106                                         {\r
1107                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
1108                                                 {\r
1109                                                         /* The queue is a member of a queue set, and posting\r
1110                                                         to the queue set caused a higher priority task to\r
1111                                                         unblock.  A context switch is required. */\r
1112                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1113                                                         {\r
1114                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1115                                                         }\r
1116                                                         else\r
1117                                                         {\r
1118                                                                 mtCOVERAGE_TEST_MARKER();\r
1119                                                         }\r
1120                                                 }\r
1121                                                 else\r
1122                                                 {\r
1123                                                         mtCOVERAGE_TEST_MARKER();\r
1124                                                 }\r
1125                                         }\r
1126                                         else\r
1127                                         {\r
1128                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1129                                                 {\r
1130                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1131                                                         {\r
1132                                                                 /* The task waiting has a higher priority so\r
1133                                                                 record that a context switch is required. */\r
1134                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1135                                                                 {\r
1136                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1137                                                                 }\r
1138                                                                 else\r
1139                                                                 {\r
1140                                                                         mtCOVERAGE_TEST_MARKER();\r
1141                                                                 }\r
1142                                                         }\r
1143                                                         else\r
1144                                                         {\r
1145                                                                 mtCOVERAGE_TEST_MARKER();\r
1146                                                         }\r
1147                                                 }\r
1148                                                 else\r
1149                                                 {\r
1150                                                         mtCOVERAGE_TEST_MARKER();\r
1151                                                 }\r
1152                                         }\r
1153                                 }\r
1154                                 #else /* configUSE_QUEUE_SETS */\r
1155                                 {\r
1156                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1157                                         {\r
1158                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1159                                                 {\r
1160                                                         /* The task waiting has a higher priority so record that a\r
1161                                                         context switch is required. */\r
1162                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1163                                                         {\r
1164                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1165                                                         }\r
1166                                                         else\r
1167                                                         {\r
1168                                                                 mtCOVERAGE_TEST_MARKER();\r
1169                                                         }\r
1170                                                 }\r
1171                                                 else\r
1172                                                 {\r
1173                                                         mtCOVERAGE_TEST_MARKER();\r
1174                                                 }\r
1175                                         }\r
1176                                         else\r
1177                                         {\r
1178                                                 mtCOVERAGE_TEST_MARKER();\r
1179                                         }\r
1180                                 }\r
1181                                 #endif /* configUSE_QUEUE_SETS */\r
1182                         }\r
1183                         else\r
1184                         {\r
1185                                 /* Increment the lock count so the task that unlocks the queue\r
1186                                 knows that data was posted while it was locked. */\r
1187                                 ++( pxQueue->xTxLock );\r
1188                         }\r
1189 \r
1190                         xReturn = pdPASS;\r
1191                 }\r
1192                 else\r
1193                 {\r
1194                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1195                         xReturn = errQUEUE_FULL;\r
1196                 }\r
1197         }\r
1198         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1199 \r
1200         return xReturn;\r
1201 }\r
1202 /*-----------------------------------------------------------*/\r
1203 \r
1204 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1205 {\r
1206 BaseType_t xReturn;\r
1207 UBaseType_t uxSavedInterruptStatus;\r
1208 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1209 \r
1210         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1211         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1212         read, instead return a flag to say whether a context switch is required or\r
1213         not (i.e. has a task with a higher priority than us been woken by this\r
1214         post). */\r
1215 \r
1216         configASSERT( pxQueue );\r
1217 \r
1218         /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
1219         if the item size is not 0. */\r
1220         configASSERT( pxQueue->uxItemSize == 0 );\r
1221 \r
1222         /* Normally a mutex would not be given from an interrupt, especially if \r
1223         there is a mutex holder, as priority inheritance makes no sense for an \r
1224         interrupts, only tasks. */\r
1225         configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );\r
1226 \r
1227         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1228         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1229         above the maximum system call priority are kept permanently enabled, even\r
1230         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1231         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1232         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1233         failure if a FreeRTOS API function is called from an interrupt that has been\r
1234         assigned a priority above the configured maximum system call priority.\r
1235         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1236         that have been assigned a priority at or (logically) below the maximum\r
1237         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1238         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1239         More information (albeit Cortex-M specific) is provided on the following\r
1240         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1241         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1242 \r
1243         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1244         {\r
1245                 /* When the queue is used to implement a semaphore no data is ever\r
1246                 moved through the queue but it is still valid to see if the queue 'has\r
1247                 space'. */\r
1248                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
1249                 {\r
1250                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1251 \r
1252                         /* A task can only have an inherited priority if it is a mutex\r
1253                         holder - and if there is a mutex holder then the mutex cannot be\r
1254                         given from an ISR.  As this is the ISR version of the function it\r
1255                         can be assumed there is no mutex holder and no need to determine if\r
1256                         priority disinheritance is needed.  Simply increase the count of\r
1257                         messages (semaphores) available. */\r
1258                         ++( pxQueue->uxMessagesWaiting );\r
1259 \r
1260                         /* The event list is not altered if the queue is locked.  This will\r
1261                         be done when the queue is unlocked later. */\r
1262                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1263                         {\r
1264                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1265                                 {\r
1266                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1267                                         {\r
1268                                                 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1269                                                 {\r
1270                                                         /* The semaphore is a member of a queue set, and\r
1271                                                         posting to the queue set caused a higher priority\r
1272                                                         task to unblock.  A context switch is required. */\r
1273                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1274                                                         {\r
1275                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1276                                                         }\r
1277                                                         else\r
1278                                                         {\r
1279                                                                 mtCOVERAGE_TEST_MARKER();\r
1280                                                         }\r
1281                                                 }\r
1282                                                 else\r
1283                                                 {\r
1284                                                         mtCOVERAGE_TEST_MARKER();\r
1285                                                 }\r
1286                                         }\r
1287                                         else\r
1288                                         {\r
1289                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1290                                                 {\r
1291                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1292                                                         {\r
1293                                                                 /* The task waiting has a higher priority so\r
1294                                                                 record that a context switch is required. */\r
1295                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1296                                                                 {\r
1297                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1298                                                                 }\r
1299                                                                 else\r
1300                                                                 {\r
1301                                                                         mtCOVERAGE_TEST_MARKER();\r
1302                                                                 }\r
1303                                                         }\r
1304                                                         else\r
1305                                                         {\r
1306                                                                 mtCOVERAGE_TEST_MARKER();\r
1307                                                         }\r
1308                                                 }\r
1309                                                 else\r
1310                                                 {\r
1311                                                         mtCOVERAGE_TEST_MARKER();\r
1312                                                 }\r
1313                                         }\r
1314                                 }\r
1315                                 #else /* configUSE_QUEUE_SETS */\r
1316                                 {\r
1317                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1318                                         {\r
1319                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1320                                                 {\r
1321                                                         /* The task waiting has a higher priority so record that a\r
1322                                                         context switch is required. */\r
1323                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1324                                                         {\r
1325                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1326                                                         }\r
1327                                                         else\r
1328                                                         {\r
1329                                                                 mtCOVERAGE_TEST_MARKER();\r
1330                                                         }\r
1331                                                 }\r
1332                                                 else\r
1333                                                 {\r
1334                                                         mtCOVERAGE_TEST_MARKER();\r
1335                                                 }\r
1336                                         }\r
1337                                         else\r
1338                                         {\r
1339                                                 mtCOVERAGE_TEST_MARKER();\r
1340                                         }\r
1341                                 }\r
1342                                 #endif /* configUSE_QUEUE_SETS */\r
1343                         }\r
1344                         else\r
1345                         {\r
1346                                 /* Increment the lock count so the task that unlocks the queue\r
1347                                 knows that data was posted while it was locked. */\r
1348                                 ++( pxQueue->xTxLock );\r
1349                         }\r
1350 \r
1351                         xReturn = pdPASS;\r
1352                 }\r
1353                 else\r
1354                 {\r
1355                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1356                         xReturn = errQUEUE_FULL;\r
1357                 }\r
1358         }\r
1359         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1360 \r
1361         return xReturn;\r
1362 }\r
1363 /*-----------------------------------------------------------*/\r
1364 \r
1365 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1366 {\r
1367 BaseType_t xEntryTimeSet = pdFALSE;\r
1368 TimeOut_t xTimeOut;\r
1369 int8_t *pcOriginalReadPosition;\r
1370 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1371 \r
1372         configASSERT( pxQueue );\r
1373         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1374         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1375         {\r
1376                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1377         }\r
1378         #endif\r
1379 \r
1380         /* This function relaxes the coding standard somewhat to allow return\r
1381         statements within the function itself.  This is done in the interest\r
1382         of execution time efficiency. */\r
1383 \r
1384         for( ;; )\r
1385         {\r
1386                 taskENTER_CRITICAL();\r
1387                 {\r
1388                         /* Is there data in the queue now?  To be running the calling task\r
1389                         must be the highest priority task wanting to access the queue. */\r
1390                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1391                         {\r
1392                                 /* Remember the read position in case the queue is only being\r
1393                                 peeked. */\r
1394                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1395 \r
1396                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1397 \r
1398                                 if( xJustPeeking == pdFALSE )\r
1399                                 {\r
1400                                         traceQUEUE_RECEIVE( pxQueue );\r
1401 \r
1402                                         /* Actually removing data, not just peeking. */\r
1403                                         --( pxQueue->uxMessagesWaiting );\r
1404 \r
1405                                         #if ( configUSE_MUTEXES == 1 )\r
1406                                         {\r
1407                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1408                                                 {\r
1409                                                         /* Record the information required to implement\r
1410                                                         priority inheritance should it become necessary. */\r
1411                                                         pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1412                                                 }\r
1413                                                 else\r
1414                                                 {\r
1415                                                         mtCOVERAGE_TEST_MARKER();\r
1416                                                 }\r
1417                                         }\r
1418                                         #endif /* configUSE_MUTEXES */\r
1419 \r
1420                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1421                                         {\r
1422                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1423                                                 {\r
1424                                                         queueYIELD_IF_USING_PREEMPTION();\r
1425                                                 }\r
1426                                                 else\r
1427                                                 {\r
1428                                                         mtCOVERAGE_TEST_MARKER();\r
1429                                                 }\r
1430                                         }\r
1431                                         else\r
1432                                         {\r
1433                                                 mtCOVERAGE_TEST_MARKER();\r
1434                                         }\r
1435                                 }\r
1436                                 else\r
1437                                 {\r
1438                                         traceQUEUE_PEEK( pxQueue );\r
1439 \r
1440                                         /* The data is not being removed, so reset the read\r
1441                                         pointer. */\r
1442                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1443 \r
1444                                         /* The data is being left in the queue, so see if there are\r
1445                                         any other tasks waiting for the data. */\r
1446                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1447                                         {\r
1448                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1449                                                 {\r
1450                                                         /* The task waiting has a higher priority than this task. */\r
1451                                                         queueYIELD_IF_USING_PREEMPTION();\r
1452                                                 }\r
1453                                                 else\r
1454                                                 {\r
1455                                                         mtCOVERAGE_TEST_MARKER();\r
1456                                                 }\r
1457                                         }\r
1458                                         else\r
1459                                         {\r
1460                                                 mtCOVERAGE_TEST_MARKER();\r
1461                                         }\r
1462                                 }\r
1463 \r
1464                                 taskEXIT_CRITICAL();\r
1465                                 return pdPASS;\r
1466                         }\r
1467                         else\r
1468                         {\r
1469                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1470                                 {\r
1471                                         /* The queue was empty and no block time is specified (or\r
1472                                         the block time has expired) so leave now. */\r
1473                                         taskEXIT_CRITICAL();\r
1474                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1475                                         return errQUEUE_EMPTY;\r
1476                                 }\r
1477                                 else if( xEntryTimeSet == pdFALSE )\r
1478                                 {\r
1479                                         /* The queue was empty and a block time was specified so\r
1480                                         configure the timeout structure. */\r
1481                                         vTaskSetTimeOutState( &xTimeOut );\r
1482                                         xEntryTimeSet = pdTRUE;\r
1483                                 }\r
1484                                 else\r
1485                                 {\r
1486                                         /* Entry time was already set. */\r
1487                                         mtCOVERAGE_TEST_MARKER();\r
1488                                 }\r
1489                         }\r
1490                 }\r
1491                 taskEXIT_CRITICAL();\r
1492 \r
1493                 /* Interrupts and other tasks can send to and receive from the queue\r
1494                 now the critical section has been exited. */\r
1495 \r
1496                 vTaskSuspendAll();\r
1497                 prvLockQueue( pxQueue );\r
1498 \r
1499                 /* Update the timeout state to see if it has expired yet. */\r
1500                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1501                 {\r
1502                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1503                         {\r
1504                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1505 \r
1506                                 #if ( configUSE_MUTEXES == 1 )\r
1507                                 {\r
1508                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1509                                         {\r
1510                                                 taskENTER_CRITICAL();\r
1511                                                 {\r
1512                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1513                                                 }\r
1514                                                 taskEXIT_CRITICAL();\r
1515                                         }\r
1516                                         else\r
1517                                         {\r
1518                                                 mtCOVERAGE_TEST_MARKER();\r
1519                                         }\r
1520                                 }\r
1521                                 #endif\r
1522 \r
1523                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1524                                 prvUnlockQueue( pxQueue );\r
1525                                 if( xTaskResumeAll() == pdFALSE )\r
1526                                 {\r
1527                                         portYIELD_WITHIN_API();\r
1528                                 }\r
1529                                 else\r
1530                                 {\r
1531                                         mtCOVERAGE_TEST_MARKER();\r
1532                                 }\r
1533                         }\r
1534                         else\r
1535                         {\r
1536                                 /* Try again. */\r
1537                                 prvUnlockQueue( pxQueue );\r
1538                                 ( void ) xTaskResumeAll();\r
1539                         }\r
1540                 }\r
1541                 else\r
1542                 {\r
1543                         prvUnlockQueue( pxQueue );\r
1544                         ( void ) xTaskResumeAll();\r
1545                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1546                         return errQUEUE_EMPTY;\r
1547                 }\r
1548         }\r
1549 }\r
1550 /*-----------------------------------------------------------*/\r
1551 \r
1552 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1553 {\r
1554 BaseType_t xReturn;\r
1555 UBaseType_t uxSavedInterruptStatus;\r
1556 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1557 \r
1558         configASSERT( pxQueue );\r
1559         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1560 \r
1561         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1562         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1563         above the maximum system call priority are kept permanently enabled, even\r
1564         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1565         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1566         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1567         failure if a FreeRTOS API function is called from an interrupt that has been\r
1568         assigned a priority above the configured maximum system call priority.\r
1569         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1570         that have been assigned a priority at or (logically) below the maximum\r
1571         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1572         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1573         More information (albeit Cortex-M specific) is provided on the following\r
1574         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1575         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1576 \r
1577         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1578         {\r
1579                 /* Cannot block in an ISR, so check there is data available. */\r
1580                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1581                 {\r
1582                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1583 \r
1584                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1585                         --( pxQueue->uxMessagesWaiting );\r
1586 \r
1587                         /* If the queue is locked the event list will not be modified.\r
1588                         Instead update the lock count so the task that unlocks the queue\r
1589                         will know that an ISR has removed data while the queue was\r
1590                         locked. */\r
1591                         if( pxQueue->xRxLock == queueUNLOCKED )\r
1592                         {\r
1593                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1594                                 {\r
1595                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1596                                         {\r
1597                                                 /* The task waiting has a higher priority than us so\r
1598                                                 force a context switch. */\r
1599                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1600                                                 {\r
1601                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1602                                                 }\r
1603                                                 else\r
1604                                                 {\r
1605                                                         mtCOVERAGE_TEST_MARKER();\r
1606                                                 }\r
1607                                         }\r
1608                                         else\r
1609                                         {\r
1610                                                 mtCOVERAGE_TEST_MARKER();\r
1611                                         }\r
1612                                 }\r
1613                                 else\r
1614                                 {\r
1615                                         mtCOVERAGE_TEST_MARKER();\r
1616                                 }\r
1617                         }\r
1618                         else\r
1619                         {\r
1620                                 /* Increment the lock count so the task that unlocks the queue\r
1621                                 knows that data was removed while it was locked. */\r
1622                                 ++( pxQueue->xRxLock );\r
1623                         }\r
1624 \r
1625                         xReturn = pdPASS;\r
1626                 }\r
1627                 else\r
1628                 {\r
1629                         xReturn = pdFAIL;\r
1630                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1631                 }\r
1632         }\r
1633         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1634 \r
1635         return xReturn;\r
1636 }\r
1637 /*-----------------------------------------------------------*/\r
1638 \r
1639 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1640 {\r
1641 BaseType_t xReturn;\r
1642 UBaseType_t uxSavedInterruptStatus;\r
1643 int8_t *pcOriginalReadPosition;\r
1644 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1645 \r
1646         configASSERT( pxQueue );\r
1647         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1648         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1649 \r
1650         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1651         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1652         above the maximum system call priority are kept permanently enabled, even\r
1653         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1654         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1655         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1656         failure if a FreeRTOS API function is called from an interrupt that has been\r
1657         assigned a priority above the configured maximum system call priority.\r
1658         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1659         that have been assigned a priority at or (logically) below the maximum\r
1660         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1661         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1662         More information (albeit Cortex-M specific) is provided on the following\r
1663         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1664         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1665 \r
1666         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1667         {\r
1668                 /* Cannot block in an ISR, so check there is data available. */\r
1669                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1670                 {\r
1671                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1672 \r
1673                         /* Remember the read position so it can be reset as nothing is\r
1674                         actually being removed from the queue. */\r
1675                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1676                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1677                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1678 \r
1679                         xReturn = pdPASS;\r
1680                 }\r
1681                 else\r
1682                 {\r
1683                         xReturn = pdFAIL;\r
1684                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1685                 }\r
1686         }\r
1687         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1688 \r
1689         return xReturn;\r
1690 }\r
1691 /*-----------------------------------------------------------*/\r
1692 \r
1693 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1694 {\r
1695 UBaseType_t uxReturn;\r
1696 \r
1697         configASSERT( xQueue );\r
1698 \r
1699         taskENTER_CRITICAL();\r
1700         {\r
1701                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1702         }\r
1703         taskEXIT_CRITICAL();\r
1704 \r
1705         return uxReturn;\r
1706 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1707 /*-----------------------------------------------------------*/\r
1708 \r
1709 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1710 {\r
1711 UBaseType_t uxReturn;\r
1712 Queue_t *pxQueue;\r
1713 \r
1714         pxQueue = ( Queue_t * ) xQueue;\r
1715         configASSERT( pxQueue );\r
1716 \r
1717         taskENTER_CRITICAL();\r
1718         {\r
1719                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1720         }\r
1721         taskEXIT_CRITICAL();\r
1722 \r
1723         return uxReturn;\r
1724 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1725 /*-----------------------------------------------------------*/\r
1726 \r
1727 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1728 {\r
1729 UBaseType_t uxReturn;\r
1730 \r
1731         configASSERT( xQueue );\r
1732 \r
1733         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1734 \r
1735         return uxReturn;\r
1736 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1737 /*-----------------------------------------------------------*/\r
1738 \r
1739 void vQueueDelete( QueueHandle_t xQueue )\r
1740 {\r
1741 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1742 \r
1743         configASSERT( pxQueue );\r
1744 \r
1745         traceQUEUE_DELETE( pxQueue );\r
1746         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1747         {\r
1748                 vQueueUnregisterQueue( pxQueue );\r
1749         }\r
1750         #endif\r
1751         vPortFree( pxQueue );\r
1752 }\r
1753 /*-----------------------------------------------------------*/\r
1754 \r
1755 #if ( configUSE_TRACE_FACILITY == 1 )\r
1756 \r
1757         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1758         {\r
1759                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1760         }\r
1761 \r
1762 #endif /* configUSE_TRACE_FACILITY */\r
1763 /*-----------------------------------------------------------*/\r
1764 \r
1765 #if ( configUSE_TRACE_FACILITY == 1 )\r
1766 \r
1767         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1768         {\r
1769                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1770         }\r
1771 \r
1772 #endif /* configUSE_TRACE_FACILITY */\r
1773 /*-----------------------------------------------------------*/\r
1774 \r
1775 #if ( configUSE_TRACE_FACILITY == 1 )\r
1776 \r
1777         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1778         {\r
1779                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1780         }\r
1781 \r
1782 #endif /* configUSE_TRACE_FACILITY */\r
1783 /*-----------------------------------------------------------*/\r
1784 \r
1785 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1786 {\r
1787 BaseType_t xReturn = pdFALSE;\r
1788 \r
1789         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1790         {\r
1791                 #if ( configUSE_MUTEXES == 1 )\r
1792                 {\r
1793                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1794                         {\r
1795                                 /* The mutex is no longer being held. */\r
1796                                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1797                                 pxQueue->pxMutexHolder = NULL;\r
1798                         }\r
1799                         else\r
1800                         {\r
1801                                 mtCOVERAGE_TEST_MARKER();\r
1802                         }\r
1803                 }\r
1804                 #endif /* configUSE_MUTEXES */\r
1805         }\r
1806         else if( xPosition == queueSEND_TO_BACK )\r
1807         {\r
1808                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1809                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1810                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1811                 {\r
1812                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1813                 }\r
1814                 else\r
1815                 {\r
1816                         mtCOVERAGE_TEST_MARKER();\r
1817                 }\r
1818         }\r
1819         else\r
1820         {\r
1821                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1822                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1823                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1824                 {\r
1825                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1826                 }\r
1827                 else\r
1828                 {\r
1829                         mtCOVERAGE_TEST_MARKER();\r
1830                 }\r
1831 \r
1832                 if( xPosition == queueOVERWRITE )\r
1833                 {\r
1834                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1835                         {\r
1836                                 /* An item is not being added but overwritten, so subtract\r
1837                                 one from the recorded number of items in the queue so when\r
1838                                 one is added again below the number of recorded items remains\r
1839                                 correct. */\r
1840                                 --( pxQueue->uxMessagesWaiting );\r
1841                         }\r
1842                         else\r
1843                         {\r
1844                                 mtCOVERAGE_TEST_MARKER();\r
1845                         }\r
1846                 }\r
1847                 else\r
1848                 {\r
1849                         mtCOVERAGE_TEST_MARKER();\r
1850                 }\r
1851         }\r
1852 \r
1853         ++( pxQueue->uxMessagesWaiting );\r
1854 \r
1855         return xReturn;\r
1856 }\r
1857 /*-----------------------------------------------------------*/\r
1858 \r
1859 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
1860 {\r
1861         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
1862         {\r
1863                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1864                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
1865                 {\r
1866                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1867                 }\r
1868                 else\r
1869                 {\r
1870                         mtCOVERAGE_TEST_MARKER();\r
1871                 }\r
1872                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
1873         }\r
1874 }\r
1875 /*-----------------------------------------------------------*/\r
1876 \r
1877 static void prvUnlockQueue( Queue_t * const pxQueue )\r
1878 {\r
1879         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
1880 \r
1881         /* The lock counts contains the number of extra data items placed or\r
1882         removed from the queue while the queue was locked.  When a queue is\r
1883         locked items can be added or removed, but the event lists cannot be\r
1884         updated. */\r
1885         taskENTER_CRITICAL();\r
1886         {\r
1887                 /* See if data was added to the queue while it was locked. */\r
1888                 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
1889                 {\r
1890                         /* Data was posted while the queue was locked.  Are any tasks\r
1891                         blocked waiting for data to become available? */\r
1892                         #if ( configUSE_QUEUE_SETS == 1 )\r
1893                         {\r
1894                                 if( pxQueue->pxQueueSetContainer != NULL )\r
1895                                 {\r
1896                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1897                                         {\r
1898                                                 /* The queue is a member of a queue set, and posting to\r
1899                                                 the queue set caused a higher priority task to unblock.\r
1900                                                 A context switch is required. */\r
1901                                                 vTaskMissedYield();\r
1902                                         }\r
1903                                         else\r
1904                                         {\r
1905                                                 mtCOVERAGE_TEST_MARKER();\r
1906                                         }\r
1907                                 }\r
1908                                 else\r
1909                                 {\r
1910                                         /* Tasks that are removed from the event list will get added to\r
1911                                         the pending ready list as the scheduler is still suspended. */\r
1912                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1913                                         {\r
1914                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1915                                                 {\r
1916                                                         /* The task waiting has a higher priority so record that a\r
1917                                                         context switch is required. */\r
1918                                                         vTaskMissedYield();\r
1919                                                 }\r
1920                                                 else\r
1921                                                 {\r
1922                                                         mtCOVERAGE_TEST_MARKER();\r
1923                                                 }\r
1924                                         }\r
1925                                         else\r
1926                                         {\r
1927                                                 break;\r
1928                                         }\r
1929                                 }\r
1930                         }\r
1931                         #else /* configUSE_QUEUE_SETS */\r
1932                         {\r
1933                                 /* Tasks that are removed from the event list will get added to\r
1934                                 the pending ready list as the scheduler is still suspended. */\r
1935                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1936                                 {\r
1937                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1938                                         {\r
1939                                                 /* The task waiting has a higher priority so record that a\r
1940                                                 context switch is required. */\r
1941                                                 vTaskMissedYield();\r
1942                                         }\r
1943                                         else\r
1944                                         {\r
1945                                                 mtCOVERAGE_TEST_MARKER();\r
1946                                         }\r
1947                                 }\r
1948                                 else\r
1949                                 {\r
1950                                         break;\r
1951                                 }\r
1952                         }\r
1953                         #endif /* configUSE_QUEUE_SETS */\r
1954 \r
1955                         --( pxQueue->xTxLock );\r
1956                 }\r
1957 \r
1958                 pxQueue->xTxLock = queueUNLOCKED;\r
1959         }\r
1960         taskEXIT_CRITICAL();\r
1961 \r
1962         /* Do the same for the Rx lock. */\r
1963         taskENTER_CRITICAL();\r
1964         {\r
1965                 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
1966                 {\r
1967                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1968                         {\r
1969                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1970                                 {\r
1971                                         vTaskMissedYield();\r
1972                                 }\r
1973                                 else\r
1974                                 {\r
1975                                         mtCOVERAGE_TEST_MARKER();\r
1976                                 }\r
1977 \r
1978                                 --( pxQueue->xRxLock );\r
1979                         }\r
1980                         else\r
1981                         {\r
1982                                 break;\r
1983                         }\r
1984                 }\r
1985 \r
1986                 pxQueue->xRxLock = queueUNLOCKED;\r
1987         }\r
1988         taskEXIT_CRITICAL();\r
1989 }\r
1990 /*-----------------------------------------------------------*/\r
1991 \r
1992 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
1993 {\r
1994 BaseType_t xReturn;\r
1995 \r
1996         taskENTER_CRITICAL();\r
1997         {\r
1998                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
1999                 {\r
2000                         xReturn = pdTRUE;\r
2001                 }\r
2002                 else\r
2003                 {\r
2004                         xReturn = pdFALSE;\r
2005                 }\r
2006         }\r
2007         taskEXIT_CRITICAL();\r
2008 \r
2009         return xReturn;\r
2010 }\r
2011 /*-----------------------------------------------------------*/\r
2012 \r
2013 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
2014 {\r
2015 BaseType_t xReturn;\r
2016 \r
2017         configASSERT( xQueue );\r
2018         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2019         {\r
2020                 xReturn = pdTRUE;\r
2021         }\r
2022         else\r
2023         {\r
2024                 xReturn = pdFALSE;\r
2025         }\r
2026 \r
2027         return xReturn;\r
2028 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2029 /*-----------------------------------------------------------*/\r
2030 \r
2031 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
2032 {\r
2033 BaseType_t xReturn;\r
2034 \r
2035         taskENTER_CRITICAL();\r
2036         {\r
2037                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2038                 {\r
2039                         xReturn = pdTRUE;\r
2040                 }\r
2041                 else\r
2042                 {\r
2043                         xReturn = pdFALSE;\r
2044                 }\r
2045         }\r
2046         taskEXIT_CRITICAL();\r
2047 \r
2048         return xReturn;\r
2049 }\r
2050 /*-----------------------------------------------------------*/\r
2051 \r
2052 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2053 {\r
2054 BaseType_t xReturn;\r
2055 \r
2056         configASSERT( xQueue );\r
2057         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
2058         {\r
2059                 xReturn = pdTRUE;\r
2060         }\r
2061         else\r
2062         {\r
2063                 xReturn = pdFALSE;\r
2064         }\r
2065 \r
2066         return xReturn;\r
2067 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2068 /*-----------------------------------------------------------*/\r
2069 \r
2070 #if ( configUSE_CO_ROUTINES == 1 )\r
2071 \r
2072         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2073         {\r
2074         BaseType_t xReturn;\r
2075         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2076 \r
2077                 /* If the queue is already full we may have to block.  A critical section\r
2078                 is required to prevent an interrupt removing something from the queue\r
2079                 between the check to see if the queue is full and blocking on the queue. */\r
2080                 portDISABLE_INTERRUPTS();\r
2081                 {\r
2082                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2083                         {\r
2084                                 /* The queue is full - do we want to block or just leave without\r
2085                                 posting? */\r
2086                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2087                                 {\r
2088                                         /* As this is called from a coroutine we cannot block directly, but\r
2089                                         return indicating that we need to block. */\r
2090                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2091                                         portENABLE_INTERRUPTS();\r
2092                                         return errQUEUE_BLOCKED;\r
2093                                 }\r
2094                                 else\r
2095                                 {\r
2096                                         portENABLE_INTERRUPTS();\r
2097                                         return errQUEUE_FULL;\r
2098                                 }\r
2099                         }\r
2100                 }\r
2101                 portENABLE_INTERRUPTS();\r
2102 \r
2103                 portDISABLE_INTERRUPTS();\r
2104                 {\r
2105                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2106                         {\r
2107                                 /* There is room in the queue, copy the data into the queue. */\r
2108                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2109                                 xReturn = pdPASS;\r
2110 \r
2111                                 /* Were any co-routines waiting for data to become available? */\r
2112                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2113                                 {\r
2114                                         /* In this instance the co-routine could be placed directly\r
2115                                         into the ready list as we are within a critical section.\r
2116                                         Instead the same pending ready list mechanism is used as if\r
2117                                         the event were caused from within an interrupt. */\r
2118                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2119                                         {\r
2120                                                 /* The co-routine waiting has a higher priority so record\r
2121                                                 that a yield might be appropriate. */\r
2122                                                 xReturn = errQUEUE_YIELD;\r
2123                                         }\r
2124                                         else\r
2125                                         {\r
2126                                                 mtCOVERAGE_TEST_MARKER();\r
2127                                         }\r
2128                                 }\r
2129                                 else\r
2130                                 {\r
2131                                         mtCOVERAGE_TEST_MARKER();\r
2132                                 }\r
2133                         }\r
2134                         else\r
2135                         {\r
2136                                 xReturn = errQUEUE_FULL;\r
2137                         }\r
2138                 }\r
2139                 portENABLE_INTERRUPTS();\r
2140 \r
2141                 return xReturn;\r
2142         }\r
2143 \r
2144 #endif /* configUSE_CO_ROUTINES */\r
2145 /*-----------------------------------------------------------*/\r
2146 \r
2147 #if ( configUSE_CO_ROUTINES == 1 )\r
2148 \r
2149         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2150         {\r
2151         BaseType_t xReturn;\r
2152         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2153 \r
2154                 /* If the queue is already empty we may have to block.  A critical section\r
2155                 is required to prevent an interrupt adding something to the queue\r
2156                 between the check to see if the queue is empty and blocking on the queue. */\r
2157                 portDISABLE_INTERRUPTS();\r
2158                 {\r
2159                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2160                         {\r
2161                                 /* There are no messages in the queue, do we want to block or just\r
2162                                 leave with nothing? */\r
2163                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2164                                 {\r
2165                                         /* As this is a co-routine we cannot block directly, but return\r
2166                                         indicating that we need to block. */\r
2167                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2168                                         portENABLE_INTERRUPTS();\r
2169                                         return errQUEUE_BLOCKED;\r
2170                                 }\r
2171                                 else\r
2172                                 {\r
2173                                         portENABLE_INTERRUPTS();\r
2174                                         return errQUEUE_FULL;\r
2175                                 }\r
2176                         }\r
2177                         else\r
2178                         {\r
2179                                 mtCOVERAGE_TEST_MARKER();\r
2180                         }\r
2181                 }\r
2182                 portENABLE_INTERRUPTS();\r
2183 \r
2184                 portDISABLE_INTERRUPTS();\r
2185                 {\r
2186                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2187                         {\r
2188                                 /* Data is available from the queue. */\r
2189                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2190                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2191                                 {\r
2192                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2193                                 }\r
2194                                 else\r
2195                                 {\r
2196                                         mtCOVERAGE_TEST_MARKER();\r
2197                                 }\r
2198                                 --( pxQueue->uxMessagesWaiting );\r
2199                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2200 \r
2201                                 xReturn = pdPASS;\r
2202 \r
2203                                 /* Were any co-routines waiting for space to become available? */\r
2204                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2205                                 {\r
2206                                         /* In this instance the co-routine could be placed directly\r
2207                                         into the ready list as we are within a critical section.\r
2208                                         Instead the same pending ready list mechanism is used as if\r
2209                                         the event were caused from within an interrupt. */\r
2210                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2211                                         {\r
2212                                                 xReturn = errQUEUE_YIELD;\r
2213                                         }\r
2214                                         else\r
2215                                         {\r
2216                                                 mtCOVERAGE_TEST_MARKER();\r
2217                                         }\r
2218                                 }\r
2219                                 else\r
2220                                 {\r
2221                                         mtCOVERAGE_TEST_MARKER();\r
2222                                 }\r
2223                         }\r
2224                         else\r
2225                         {\r
2226                                 xReturn = pdFAIL;\r
2227                         }\r
2228                 }\r
2229                 portENABLE_INTERRUPTS();\r
2230 \r
2231                 return xReturn;\r
2232         }\r
2233 \r
2234 #endif /* configUSE_CO_ROUTINES */\r
2235 /*-----------------------------------------------------------*/\r
2236 \r
2237 #if ( configUSE_CO_ROUTINES == 1 )\r
2238 \r
2239         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2240         {\r
2241         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2242 \r
2243                 /* Cannot block within an ISR so if there is no space on the queue then\r
2244                 exit without doing anything. */\r
2245                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2246                 {\r
2247                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2248 \r
2249                         /* We only want to wake one co-routine per ISR, so check that a\r
2250                         co-routine has not already been woken. */\r
2251                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2252                         {\r
2253                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2254                                 {\r
2255                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2256                                         {\r
2257                                                 return pdTRUE;\r
2258                                         }\r
2259                                         else\r
2260                                         {\r
2261                                                 mtCOVERAGE_TEST_MARKER();\r
2262                                         }\r
2263                                 }\r
2264                                 else\r
2265                                 {\r
2266                                         mtCOVERAGE_TEST_MARKER();\r
2267                                 }\r
2268                         }\r
2269                         else\r
2270                         {\r
2271                                 mtCOVERAGE_TEST_MARKER();\r
2272                         }\r
2273                 }\r
2274                 else\r
2275                 {\r
2276                         mtCOVERAGE_TEST_MARKER();\r
2277                 }\r
2278 \r
2279                 return xCoRoutinePreviouslyWoken;\r
2280         }\r
2281 \r
2282 #endif /* configUSE_CO_ROUTINES */\r
2283 /*-----------------------------------------------------------*/\r
2284 \r
2285 #if ( configUSE_CO_ROUTINES == 1 )\r
2286 \r
2287         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2288         {\r
2289         BaseType_t xReturn;\r
2290         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2291 \r
2292                 /* We cannot block from an ISR, so check there is data available. If\r
2293                 not then just leave without doing anything. */\r
2294                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2295                 {\r
2296                         /* Copy the data from the queue. */\r
2297                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2298                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2299                         {\r
2300                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2301                         }\r
2302                         else\r
2303                         {\r
2304                                 mtCOVERAGE_TEST_MARKER();\r
2305                         }\r
2306                         --( pxQueue->uxMessagesWaiting );\r
2307                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2308 \r
2309                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2310                         {\r
2311                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2312                                 {\r
2313                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2314                                         {\r
2315                                                 *pxCoRoutineWoken = pdTRUE;\r
2316                                         }\r
2317                                         else\r
2318                                         {\r
2319                                                 mtCOVERAGE_TEST_MARKER();\r
2320                                         }\r
2321                                 }\r
2322                                 else\r
2323                                 {\r
2324                                         mtCOVERAGE_TEST_MARKER();\r
2325                                 }\r
2326                         }\r
2327                         else\r
2328                         {\r
2329                                 mtCOVERAGE_TEST_MARKER();\r
2330                         }\r
2331 \r
2332                         xReturn = pdPASS;\r
2333                 }\r
2334                 else\r
2335                 {\r
2336                         xReturn = pdFAIL;\r
2337                 }\r
2338 \r
2339                 return xReturn;\r
2340         }\r
2341 \r
2342 #endif /* configUSE_CO_ROUTINES */\r
2343 /*-----------------------------------------------------------*/\r
2344 \r
2345 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2346 \r
2347         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2348         {\r
2349         UBaseType_t ux;\r
2350 \r
2351                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2352                 a free slot. */\r
2353                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2354                 {\r
2355                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2356                         {\r
2357                                 /* Store the information on this queue. */\r
2358                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2359                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2360 \r
2361                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2362                                 break;\r
2363                         }\r
2364                         else\r
2365                         {\r
2366                                 mtCOVERAGE_TEST_MARKER();\r
2367                         }\r
2368                 }\r
2369         }\r
2370 \r
2371 #endif /* configQUEUE_REGISTRY_SIZE */\r
2372 /*-----------------------------------------------------------*/\r
2373 \r
2374 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2375 \r
2376         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2377         {\r
2378         UBaseType_t ux;\r
2379 \r
2380                 /* See if the handle of the queue being unregistered in actually in the\r
2381                 registry. */\r
2382                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2383                 {\r
2384                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2385                         {\r
2386                                 /* Set the name to NULL to show that this slot if free again. */\r
2387                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2388                                 break;\r
2389                         }\r
2390                         else\r
2391                         {\r
2392                                 mtCOVERAGE_TEST_MARKER();\r
2393                         }\r
2394                 }\r
2395 \r
2396         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2397 \r
2398 #endif /* configQUEUE_REGISTRY_SIZE */\r
2399 /*-----------------------------------------------------------*/\r
2400 \r
2401 #if ( configUSE_TIMERS == 1 )\r
2402 \r
2403         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
2404         {\r
2405         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2406 \r
2407                 /* This function should not be called by application code hence the\r
2408                 'Restricted' in its name.  It is not part of the public API.  It is\r
2409                 designed for use by kernel code, and has special calling requirements.\r
2410                 It can result in vListInsert() being called on a list that can only\r
2411                 possibly ever have one item in it, so the list will be fast, but even\r
2412                 so it should be called with the scheduler locked and not from a critical\r
2413                 section. */\r
2414 \r
2415                 /* Only do anything if there are no messages in the queue.  This function\r
2416                 will not actually cause the task to block, just place it on a blocked\r
2417                 list.  It will not block until the scheduler is unlocked - at which\r
2418                 time a yield will be performed.  If an item is added to the queue while\r
2419                 the queue is locked, and the calling task blocks on the queue, then the\r
2420                 calling task will be immediately unblocked when the queue is unlocked. */\r
2421                 prvLockQueue( pxQueue );\r
2422                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2423                 {\r
2424                         /* There is nothing in the queue, block for the specified period. */\r
2425                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
2426                 }\r
2427                 else\r
2428                 {\r
2429                         mtCOVERAGE_TEST_MARKER();\r
2430                 }\r
2431                 prvUnlockQueue( pxQueue );\r
2432         }\r
2433 \r
2434 #endif /* configUSE_TIMERS */\r
2435 /*-----------------------------------------------------------*/\r
2436 \r
2437 #if ( configUSE_QUEUE_SETS == 1 )\r
2438 \r
2439         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2440         {\r
2441         QueueSetHandle_t pxQueue;\r
2442 \r
2443                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2444 \r
2445                 return pxQueue;\r
2446         }\r
2447 \r
2448 #endif /* configUSE_QUEUE_SETS */\r
2449 /*-----------------------------------------------------------*/\r
2450 \r
2451 #if ( configUSE_QUEUE_SETS == 1 )\r
2452 \r
2453         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2454         {\r
2455         BaseType_t xReturn;\r
2456 \r
2457                 taskENTER_CRITICAL();\r
2458                 {\r
2459                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2460                         {\r
2461                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2462                                 xReturn = pdFAIL;\r
2463                         }\r
2464                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2465                         {\r
2466                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2467                                 items in the queue/semaphore. */\r
2468                                 xReturn = pdFAIL;\r
2469                         }\r
2470                         else\r
2471                         {\r
2472                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2473                                 xReturn = pdPASS;\r
2474                         }\r
2475                 }\r
2476                 taskEXIT_CRITICAL();\r
2477 \r
2478                 return xReturn;\r
2479         }\r
2480 \r
2481 #endif /* configUSE_QUEUE_SETS */\r
2482 /*-----------------------------------------------------------*/\r
2483 \r
2484 #if ( configUSE_QUEUE_SETS == 1 )\r
2485 \r
2486         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2487         {\r
2488         BaseType_t xReturn;\r
2489         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2490 \r
2491                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2492                 {\r
2493                         /* The queue was not a member of the set. */\r
2494                         xReturn = pdFAIL;\r
2495                 }\r
2496                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2497                 {\r
2498                         /* It is dangerous to remove a queue from a set when the queue is\r
2499                         not empty because the queue set will still hold pending events for\r
2500                         the queue. */\r
2501                         xReturn = pdFAIL;\r
2502                 }\r
2503                 else\r
2504                 {\r
2505                         taskENTER_CRITICAL();\r
2506                         {\r
2507                                 /* The queue is no longer contained in the set. */\r
2508                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2509                         }\r
2510                         taskEXIT_CRITICAL();\r
2511                         xReturn = pdPASS;\r
2512                 }\r
2513 \r
2514                 return xReturn;\r
2515         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2516 \r
2517 #endif /* configUSE_QUEUE_SETS */\r
2518 /*-----------------------------------------------------------*/\r
2519 \r
2520 #if ( configUSE_QUEUE_SETS == 1 )\r
2521 \r
2522         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2523         {\r
2524         QueueSetMemberHandle_t xReturn = NULL;\r
2525 \r
2526                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2527                 return xReturn;\r
2528         }\r
2529 \r
2530 #endif /* configUSE_QUEUE_SETS */\r
2531 /*-----------------------------------------------------------*/\r
2532 \r
2533 #if ( configUSE_QUEUE_SETS == 1 )\r
2534 \r
2535         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2536         {\r
2537         QueueSetMemberHandle_t xReturn = NULL;\r
2538 \r
2539                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2540                 return xReturn;\r
2541         }\r
2542 \r
2543 #endif /* configUSE_QUEUE_SETS */\r
2544 /*-----------------------------------------------------------*/\r
2545 \r
2546 #if ( configUSE_QUEUE_SETS == 1 )\r
2547 \r
2548         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2549         {\r
2550         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2551         BaseType_t xReturn = pdFALSE;\r
2552 \r
2553                 /* This function must be called form a critical section. */\r
2554 \r
2555                 configASSERT( pxQueueSetContainer );\r
2556                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2557 \r
2558                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2559                 {\r
2560                         traceQUEUE_SEND( pxQueueSetContainer );\r
2561 \r
2562                         /* The data copied is the handle of the queue that contains data. */\r
2563                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2564 \r
2565                         if( pxQueueSetContainer->xTxLock == queueUNLOCKED )\r
2566                         {\r
2567                                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2568                                 {\r
2569                                         if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2570                                         {\r
2571                                                 /* The task waiting has a higher priority. */\r
2572                                                 xReturn = pdTRUE;\r
2573                                         }\r
2574                                         else\r
2575                                         {\r
2576                                                 mtCOVERAGE_TEST_MARKER();\r
2577                                         }\r
2578                                 }\r
2579                                 else\r
2580                                 {\r
2581                                         mtCOVERAGE_TEST_MARKER();\r
2582                                 }\r
2583                         }\r
2584                         else\r
2585                         {\r
2586                                 ( pxQueueSetContainer->xTxLock )++;\r
2587                         }\r
2588                 }\r
2589                 else\r
2590                 {\r
2591                         mtCOVERAGE_TEST_MARKER();\r
2592                 }\r
2593 \r
2594                 return xReturn;\r
2595         }\r
2596 \r
2597 #endif /* configUSE_QUEUE_SETS */\r
2598 \r
2599 \r
2600 \r
2601 \r
2602 \r
2603 \r
2604 \r
2605 \r
2606 \r
2607 \r
2608 \r
2609 \r