]> git.sur5r.net Git - freertos/blob - FreeRTOS/Source/queue.c
Fix buffer clean up in \FreeRTOS_Plus_TCP_Minimal_Windows_Simulator\demo_logging.c.
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2  * FreeRTOS Kernel V10.0.1\r
3  * Copyright (C) 2017 Amazon.com, Inc. or its affiliates.  All Rights Reserved.\r
4  *\r
5  * Permission is hereby granted, free of charge, to any person obtaining a copy of\r
6  * this software and associated documentation files (the "Software"), to deal in\r
7  * the Software without restriction, including without limitation the rights to\r
8  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r
9  * the Software, and to permit persons to whom the Software is furnished to do so,\r
10  * subject to the following conditions:\r
11  *\r
12  * The above copyright notice and this permission notice shall be included in all\r
13  * copies or substantial portions of the Software.\r
14  *\r
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r
17  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
21  *\r
22  * http://www.FreeRTOS.org\r
23  * http://aws.amazon.com/freertos\r
24  *\r
25  * 1 tab == 4 spaces!\r
26  */\r
27 \r
28 #include <stdlib.h>\r
29 #include <string.h>\r
30 \r
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
32 all the API functions to use the MPU wrappers.  That should only be done when\r
33 task.h is included from an application file. */\r
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
35 \r
36 #include "FreeRTOS.h"\r
37 #include "task.h"\r
38 #include "queue.h"\r
39 \r
40 #if ( configUSE_CO_ROUTINES == 1 )\r
41         #include "croutine.h"\r
42 #endif\r
43 \r
44 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
45 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
46 header files above, but not in this file, in order to generate the correct\r
47 privileged Vs unprivileged linkage and placement. */\r
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
49 \r
50 \r
51 /* Constants used with the cRxLock and cTxLock structure members. */\r
52 #define queueUNLOCKED                                   ( ( int8_t ) -1 )\r
53 #define queueLOCKED_UNMODIFIED                  ( ( int8_t ) 0 )\r
54 \r
55 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
56 pcTail members are used as pointers into the queue storage area.  When the\r
57 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
58 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
59 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
60 names to the pcHead and pcTail structure members to ensure the readability of\r
61 the code is maintained despite this dual use of two structure members.  An\r
62 alternative implementation would be to use a union, but use of a union is\r
63 against the coding standard (although an exception to the standard has been\r
64 permitted where the dual use also significantly changes the type of the\r
65 structure member). */\r
66 #define pxMutexHolder                                   pcTail\r
67 #define uxQueueType                                             pcHead\r
68 #define queueQUEUE_IS_MUTEX                             NULL\r
69 \r
70 /* Semaphores do not actually store or copy data, so have an item size of\r
71 zero. */\r
72 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
73 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
74 \r
75 #if( configUSE_PREEMPTION == 0 )\r
76         /* If the cooperative scheduler is being used then a yield should not be\r
77         performed just because a higher priority task has been woken. */\r
78         #define queueYIELD_IF_USING_PREEMPTION()\r
79 #else\r
80         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
81 #endif\r
82 \r
83 /*\r
84  * Definition of the queue used by the scheduler.\r
85  * Items are queued by copy, not reference.  See the following link for the\r
86  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
87  */\r
88 typedef struct QueueDefinition\r
89 {\r
90         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
91         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
92         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
93 \r
94         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
95         {\r
96                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
97                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
98         } u;\r
99 \r
100         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
101         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
102 \r
103         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
104         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
105         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
106 \r
107         volatile int8_t cRxLock;                /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
108         volatile int8_t cTxLock;                /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
109 \r
110         #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
111                 uint8_t ucStaticallyAllocated;  /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */\r
112         #endif\r
113 \r
114         #if ( configUSE_QUEUE_SETS == 1 )\r
115                 struct QueueDefinition *pxQueueSetContainer;\r
116         #endif\r
117 \r
118         #if ( configUSE_TRACE_FACILITY == 1 )\r
119                 UBaseType_t uxQueueNumber;\r
120                 uint8_t ucQueueType;\r
121         #endif\r
122 \r
123 } xQUEUE;\r
124 \r
125 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
126 name below to enable the use of older kernel aware debuggers. */\r
127 typedef xQUEUE Queue_t;\r
128 \r
129 /*-----------------------------------------------------------*/\r
130 \r
131 /*\r
132  * The queue registry is just a means for kernel aware debuggers to locate\r
133  * queue structures.  It has no other purpose so is an optional component.\r
134  */\r
135 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
136 \r
137         /* The type stored within the queue registry array.  This allows a name\r
138         to be assigned to each queue making kernel aware debugging a little\r
139         more user friendly. */\r
140         typedef struct QUEUE_REGISTRY_ITEM\r
141         {\r
142                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
143                 QueueHandle_t xHandle;\r
144         } xQueueRegistryItem;\r
145 \r
146         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
147         new xQueueRegistryItem name below to enable the use of older kernel aware\r
148         debuggers. */\r
149         typedef xQueueRegistryItem QueueRegistryItem_t;\r
150 \r
151         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
152         The pcQueueName member of a structure being NULL is indicative of the\r
153         array position being vacant. */\r
154         PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
155 \r
156 #endif /* configQUEUE_REGISTRY_SIZE */\r
157 \r
158 /*\r
159  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
160  * prevent an ISR from adding or removing items to the queue, but does prevent\r
161  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
162  * queue is locked it will instead increment the appropriate queue lock count\r
163  * to indicate that a task may require unblocking.  When the queue in unlocked\r
164  * these lock counts are inspected, and the appropriate action taken.\r
165  */\r
166 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
167 \r
168 /*\r
169  * Uses a critical section to determine if there is any data in a queue.\r
170  *\r
171  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
172  */\r
173 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
174 \r
175 /*\r
176  * Uses a critical section to determine if there is any space in a queue.\r
177  *\r
178  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
179  */\r
180 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
181 \r
182 /*\r
183  * Copies an item into the queue, either at the front of the queue or the\r
184  * back of the queue.\r
185  */\r
186 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
187 \r
188 /*\r
189  * Copies an item out of a queue.\r
190  */\r
191 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
192 \r
193 #if ( configUSE_QUEUE_SETS == 1 )\r
194         /*\r
195          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
196          * the queue set that the queue contains data.\r
197          */\r
198         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
199 #endif\r
200 \r
201 /*\r
202  * Called after a Queue_t structure has been allocated either statically or\r
203  * dynamically to fill in the structure's members.\r
204  */\r
205 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;\r
206 \r
207 /*\r
208  * Mutexes are a special type of queue.  When a mutex is created, first the\r
209  * queue is created, then prvInitialiseMutex() is called to configure the queue\r
210  * as a mutex.\r
211  */\r
212 #if( configUSE_MUTEXES == 1 )\r
213         static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;\r
214 #endif\r
215 \r
216 #if( configUSE_MUTEXES == 1 )\r
217         /*\r
218          * If a task waiting for a mutex causes the mutex holder to inherit a\r
219          * priority, but the waiting task times out, then the holder should\r
220          * disinherit the priority - but only down to the highest priority of any\r
221          * other tasks that are waiting for the same mutex.  This function returns\r
222          * that priority.\r
223          */\r
224         static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
225 #endif\r
226 /*-----------------------------------------------------------*/\r
227 \r
228 /*\r
229  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
230  * accessing the queue event lists.\r
231  */\r
232 #define prvLockQueue( pxQueue )                                                         \\r
233         taskENTER_CRITICAL();                                                                   \\r
234         {                                                                                                               \\r
235                 if( ( pxQueue )->cRxLock == queueUNLOCKED )                     \\r
236                 {                                                                                                       \\r
237                         ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED;  \\r
238                 }                                                                                                       \\r
239                 if( ( pxQueue )->cTxLock == queueUNLOCKED )                     \\r
240                 {                                                                                                       \\r
241                         ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED;  \\r
242                 }                                                                                                       \\r
243         }                                                                                                               \\r
244         taskEXIT_CRITICAL()\r
245 /*-----------------------------------------------------------*/\r
246 \r
247 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
248 {\r
249 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
250 \r
251         configASSERT( pxQueue );\r
252 \r
253         taskENTER_CRITICAL();\r
254         {\r
255                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
256                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
257                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
258                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
259                 pxQueue->cRxLock = queueUNLOCKED;\r
260                 pxQueue->cTxLock = queueUNLOCKED;\r
261 \r
262                 if( xNewQueue == pdFALSE )\r
263                 {\r
264                         /* If there are tasks blocked waiting to read from the queue, then\r
265                         the tasks will remain blocked as after this function exits the queue\r
266                         will still be empty.  If there are tasks blocked waiting to write to\r
267                         the queue, then one should be unblocked as after this function exits\r
268                         it will be possible to write to it. */\r
269                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
270                         {\r
271                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
272                                 {\r
273                                         queueYIELD_IF_USING_PREEMPTION();\r
274                                 }\r
275                                 else\r
276                                 {\r
277                                         mtCOVERAGE_TEST_MARKER();\r
278                                 }\r
279                         }\r
280                         else\r
281                         {\r
282                                 mtCOVERAGE_TEST_MARKER();\r
283                         }\r
284                 }\r
285                 else\r
286                 {\r
287                         /* Ensure the event queues start in the correct state. */\r
288                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
289                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
290                 }\r
291         }\r
292         taskEXIT_CRITICAL();\r
293 \r
294         /* A value is returned for calling semantic consistency with previous\r
295         versions. */\r
296         return pdPASS;\r
297 }\r
298 /*-----------------------------------------------------------*/\r
299 \r
300 #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
301 \r
302         QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )\r
303         {\r
304         Queue_t *pxNewQueue;\r
305 \r
306                 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
307 \r
308                 /* The StaticQueue_t structure and the queue storage area must be\r
309                 supplied. */\r
310                 configASSERT( pxStaticQueue != NULL );\r
311 \r
312                 /* A queue storage area should be provided if the item size is not 0, and\r
313                 should not be provided if the item size is 0. */\r
314                 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );\r
315                 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );\r
316 \r
317                 #if( configASSERT_DEFINED == 1 )\r
318                 {\r
319                         /* Sanity check that the size of the structure used to declare a\r
320                         variable of type StaticQueue_t or StaticSemaphore_t equals the size of\r
321                         the real queue and semaphore structures. */\r
322                         volatile size_t xSize = sizeof( StaticQueue_t );\r
323                         configASSERT( xSize == sizeof( Queue_t ) );\r
324                 }\r
325                 #endif /* configASSERT_DEFINED */\r
326 \r
327                 /* The address of a statically allocated queue was passed in, use it.\r
328                 The address of a statically allocated storage area was also passed in\r
329                 but is already set. */\r
330                 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */\r
331 \r
332                 if( pxNewQueue != NULL )\r
333                 {\r
334                         #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )\r
335                         {\r
336                                 /* Queues can be allocated wither statically or dynamically, so\r
337                                 note this queue was allocated statically in case the queue is\r
338                                 later deleted. */\r
339                                 pxNewQueue->ucStaticallyAllocated = pdTRUE;\r
340                         }\r
341                         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
342 \r
343                         prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );\r
344                 }\r
345                 else\r
346                 {\r
347                         traceQUEUE_CREATE_FAILED( ucQueueType );\r
348                 }\r
349 \r
350                 return pxNewQueue;\r
351         }\r
352 \r
353 #endif /* configSUPPORT_STATIC_ALLOCATION */\r
354 /*-----------------------------------------------------------*/\r
355 \r
356 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )\r
357 \r
358         QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
359         {\r
360         Queue_t *pxNewQueue;\r
361         size_t xQueueSizeInBytes;\r
362         uint8_t *pucQueueStorage;\r
363 \r
364                 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
365 \r
366                 if( uxItemSize == ( UBaseType_t ) 0 )\r
367                 {\r
368                         /* There is not going to be a queue storage area. */\r
369                         xQueueSizeInBytes = ( size_t ) 0;\r
370                 }\r
371                 else\r
372                 {\r
373                         /* Allocate enough space to hold the maximum number of items that\r
374                         can be in the queue at any time. */\r
375                         xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
376                 }\r
377 \r
378                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
379 \r
380                 if( pxNewQueue != NULL )\r
381                 {\r
382                         /* Jump past the queue structure to find the location of the queue\r
383                         storage area. */\r
384                         pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );\r
385 \r
386                         #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
387                         {\r
388                                 /* Queues can be created either statically or dynamically, so\r
389                                 note this task was created dynamically in case it is later\r
390                                 deleted. */\r
391                                 pxNewQueue->ucStaticallyAllocated = pdFALSE;\r
392                         }\r
393                         #endif /* configSUPPORT_STATIC_ALLOCATION */\r
394 \r
395                         prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );\r
396                 }\r
397                 else\r
398                 {\r
399                         traceQUEUE_CREATE_FAILED( ucQueueType );\r
400                 }\r
401 \r
402                 return pxNewQueue;\r
403         }\r
404 \r
405 #endif /* configSUPPORT_STATIC_ALLOCATION */\r
406 /*-----------------------------------------------------------*/\r
407 \r
408 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )\r
409 {\r
410         /* Remove compiler warnings about unused parameters should\r
411         configUSE_TRACE_FACILITY not be set to 1. */\r
412         ( void ) ucQueueType;\r
413 \r
414         if( uxItemSize == ( UBaseType_t ) 0 )\r
415         {\r
416                 /* No RAM was allocated for the queue storage area, but PC head cannot\r
417                 be set to NULL because NULL is used as a key to say the queue is used as\r
418                 a mutex.  Therefore just set pcHead to point to the queue as a benign\r
419                 value that is known to be within the memory map. */\r
420                 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
421         }\r
422         else\r
423         {\r
424                 /* Set the head to the start of the queue storage area. */\r
425                 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;\r
426         }\r
427 \r
428         /* Initialise the queue members as described where the queue type is\r
429         defined. */\r
430         pxNewQueue->uxLength = uxQueueLength;\r
431         pxNewQueue->uxItemSize = uxItemSize;\r
432         ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
433 \r
434         #if ( configUSE_TRACE_FACILITY == 1 )\r
435         {\r
436                 pxNewQueue->ucQueueType = ucQueueType;\r
437         }\r
438         #endif /* configUSE_TRACE_FACILITY */\r
439 \r
440         #if( configUSE_QUEUE_SETS == 1 )\r
441         {\r
442                 pxNewQueue->pxQueueSetContainer = NULL;\r
443         }\r
444         #endif /* configUSE_QUEUE_SETS */\r
445 \r
446         traceQUEUE_CREATE( pxNewQueue );\r
447 }\r
448 /*-----------------------------------------------------------*/\r
449 \r
450 #if( configUSE_MUTEXES == 1 )\r
451 \r
452         static void prvInitialiseMutex( Queue_t *pxNewQueue )\r
453         {\r
454                 if( pxNewQueue != NULL )\r
455                 {\r
456                         /* The queue create function will set all the queue structure members\r
457                         correctly for a generic queue, but this function is creating a\r
458                         mutex.  Overwrite those members that need to be set differently -\r
459                         in particular the information required for priority inheritance. */\r
460                         pxNewQueue->pxMutexHolder = NULL;\r
461                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
462 \r
463                         /* In case this is a recursive mutex. */\r
464                         pxNewQueue->u.uxRecursiveCallCount = 0;\r
465 \r
466                         traceCREATE_MUTEX( pxNewQueue );\r
467 \r
468                         /* Start with the semaphore in the expected state. */\r
469                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
470                 }\r
471                 else\r
472                 {\r
473                         traceCREATE_MUTEX_FAILED();\r
474                 }\r
475         }\r
476 \r
477 #endif /* configUSE_MUTEXES */\r
478 /*-----------------------------------------------------------*/\r
479 \r
480 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
481 \r
482         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
483         {\r
484         Queue_t *pxNewQueue;\r
485         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;\r
486 \r
487                 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );\r
488                 prvInitialiseMutex( pxNewQueue );\r
489 \r
490                 return pxNewQueue;\r
491         }\r
492 \r
493 #endif /* configUSE_MUTEXES */\r
494 /*-----------------------------------------------------------*/\r
495 \r
496 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
497 \r
498         QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )\r
499         {\r
500         Queue_t *pxNewQueue;\r
501         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;\r
502 \r
503                 /* Prevent compiler warnings about unused parameters if\r
504                 configUSE_TRACE_FACILITY does not equal 1. */\r
505                 ( void ) ucQueueType;\r
506 \r
507                 pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );\r
508                 prvInitialiseMutex( pxNewQueue );\r
509 \r
510                 return pxNewQueue;\r
511         }\r
512 \r
513 #endif /* configUSE_MUTEXES */\r
514 /*-----------------------------------------------------------*/\r
515 \r
516 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
517 \r
518         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
519         {\r
520         void *pxReturn;\r
521 \r
522                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
523                 be called directly.  Note:  This is a good way of determining if the\r
524                 calling task is the mutex holder, but not a good way of determining the\r
525                 identity of the mutex holder, as the holder may change between the\r
526                 following critical section exiting and the function returning. */\r
527                 taskENTER_CRITICAL();\r
528                 {\r
529                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
530                         {\r
531                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
532                         }\r
533                         else\r
534                         {\r
535                                 pxReturn = NULL;\r
536                         }\r
537                 }\r
538                 taskEXIT_CRITICAL();\r
539 \r
540                 return pxReturn;\r
541         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
542 \r
543 #endif\r
544 /*-----------------------------------------------------------*/\r
545 \r
546 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
547 \r
548         void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )\r
549         {\r
550         void *pxReturn;\r
551 \r
552                 configASSERT( xSemaphore );\r
553 \r
554                 /* Mutexes cannot be used in interrupt service routines, so the mutex\r
555                 holder should not change in an ISR, and therefore a critical section is\r
556                 not required here. */\r
557                 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
558                 {\r
559                         pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
560                 }\r
561                 else\r
562                 {\r
563                         pxReturn = NULL;\r
564                 }\r
565 \r
566                 return pxReturn;\r
567         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
568 \r
569 #endif\r
570 /*-----------------------------------------------------------*/\r
571 \r
572 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
573 \r
574         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
575         {\r
576         BaseType_t xReturn;\r
577         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
578 \r
579                 configASSERT( pxMutex );\r
580 \r
581                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
582                 change outside of this task.  If this task does not hold the mutex then\r
583                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
584                 this is the only condition we are interested in it does not matter if\r
585                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
586                 mutual exclusion is required to test the pxMutexHolder variable. */\r
587                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
588                 {\r
589                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
590 \r
591                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
592                         the task handle, therefore no underflow check is required.  Also,\r
593                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
594                         there can only be one, no mutual exclusion is required to modify the\r
595                         uxRecursiveCallCount member. */\r
596                         ( pxMutex->u.uxRecursiveCallCount )--;\r
597 \r
598                         /* Has the recursive call count unwound to 0? */\r
599                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
600                         {\r
601                                 /* Return the mutex.  This will automatically unblock any other\r
602                                 task that might be waiting to access the mutex. */\r
603                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
604                         }\r
605                         else\r
606                         {\r
607                                 mtCOVERAGE_TEST_MARKER();\r
608                         }\r
609 \r
610                         xReturn = pdPASS;\r
611                 }\r
612                 else\r
613                 {\r
614                         /* The mutex cannot be given because the calling task is not the\r
615                         holder. */\r
616                         xReturn = pdFAIL;\r
617 \r
618                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
619                 }\r
620 \r
621                 return xReturn;\r
622         }\r
623 \r
624 #endif /* configUSE_RECURSIVE_MUTEXES */\r
625 /*-----------------------------------------------------------*/\r
626 \r
627 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
628 \r
629         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
630         {\r
631         BaseType_t xReturn;\r
632         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
633 \r
634                 configASSERT( pxMutex );\r
635 \r
636                 /* Comments regarding mutual exclusion as per those within\r
637                 xQueueGiveMutexRecursive(). */\r
638 \r
639                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
640 \r
641                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
642                 {\r
643                         ( pxMutex->u.uxRecursiveCallCount )++;\r
644                         xReturn = pdPASS;\r
645                 }\r
646                 else\r
647                 {\r
648                         xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );\r
649 \r
650                         /* pdPASS will only be returned if the mutex was successfully\r
651                         obtained.  The calling task may have entered the Blocked state\r
652                         before reaching here. */\r
653                         if( xReturn != pdFAIL )\r
654                         {\r
655                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
656                         }\r
657                         else\r
658                         {\r
659                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
660                         }\r
661                 }\r
662 \r
663                 return xReturn;\r
664         }\r
665 \r
666 #endif /* configUSE_RECURSIVE_MUTEXES */\r
667 /*-----------------------------------------------------------*/\r
668 \r
669 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
670 \r
671         QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )\r
672         {\r
673         QueueHandle_t xHandle;\r
674 \r
675                 configASSERT( uxMaxCount != 0 );\r
676                 configASSERT( uxInitialCount <= uxMaxCount );\r
677 \r
678                 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
679 \r
680                 if( xHandle != NULL )\r
681                 {\r
682                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
683 \r
684                         traceCREATE_COUNTING_SEMAPHORE();\r
685                 }\r
686                 else\r
687                 {\r
688                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
689                 }\r
690 \r
691                 return xHandle;\r
692         }\r
693 \r
694 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */\r
695 /*-----------------------------------------------------------*/\r
696 \r
697 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
698 \r
699         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
700         {\r
701         QueueHandle_t xHandle;\r
702 \r
703                 configASSERT( uxMaxCount != 0 );\r
704                 configASSERT( uxInitialCount <= uxMaxCount );\r
705 \r
706                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
707 \r
708                 if( xHandle != NULL )\r
709                 {\r
710                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
711 \r
712                         traceCREATE_COUNTING_SEMAPHORE();\r
713                 }\r
714                 else\r
715                 {\r
716                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
717                 }\r
718 \r
719                 return xHandle;\r
720         }\r
721 \r
722 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */\r
723 /*-----------------------------------------------------------*/\r
724 \r
725 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
726 {\r
727 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
728 TimeOut_t xTimeOut;\r
729 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
730 \r
731         configASSERT( pxQueue );\r
732         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
733         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
734         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
735         {\r
736                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
737         }\r
738         #endif\r
739 \r
740 \r
741         /* This function relaxes the coding standard somewhat to allow return\r
742         statements within the function itself.  This is done in the interest\r
743         of execution time efficiency. */\r
744         for( ;; )\r
745         {\r
746                 taskENTER_CRITICAL();\r
747                 {\r
748                         /* Is there room on the queue now?  The running task must be the\r
749                         highest priority task wanting to access the queue.  If the head item\r
750                         in the queue is to be overwritten then it does not matter if the\r
751                         queue is full. */\r
752                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
753                         {\r
754                                 traceQUEUE_SEND( pxQueue );\r
755 \r
756                                 #if ( configUSE_QUEUE_SETS == 1 )\r
757                                 {\r
758                                 UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;\r
759 \r
760                                         xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
761 \r
762                                         if( pxQueue->pxQueueSetContainer != NULL )\r
763                                         {\r
764                                                 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )\r
765                                                 {\r
766                                                         /* Do not notify the queue set as an existing item\r
767                                                         was overwritten in the queue so the number of items\r
768                                                         in the queue has not changed. */\r
769                                                         mtCOVERAGE_TEST_MARKER();\r
770                                                 }\r
771                                                 else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )\r
772                                                 {\r
773                                                         /* The queue is a member of a queue set, and posting\r
774                                                         to the queue set caused a higher priority task to\r
775                                                         unblock. A context switch is required. */\r
776                                                         queueYIELD_IF_USING_PREEMPTION();\r
777                                                 }\r
778                                                 else\r
779                                                 {\r
780                                                         mtCOVERAGE_TEST_MARKER();\r
781                                                 }\r
782                                         }\r
783                                         else\r
784                                         {\r
785                                                 /* If there was a task waiting for data to arrive on the\r
786                                                 queue then unblock it now. */\r
787                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
788                                                 {\r
789                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
790                                                         {\r
791                                                                 /* The unblocked task has a priority higher than\r
792                                                                 our own so yield immediately.  Yes it is ok to\r
793                                                                 do this from within the critical section - the\r
794                                                                 kernel takes care of that. */\r
795                                                                 queueYIELD_IF_USING_PREEMPTION();\r
796                                                         }\r
797                                                         else\r
798                                                         {\r
799                                                                 mtCOVERAGE_TEST_MARKER();\r
800                                                         }\r
801                                                 }\r
802                                                 else if( xYieldRequired != pdFALSE )\r
803                                                 {\r
804                                                         /* This path is a special case that will only get\r
805                                                         executed if the task was holding multiple mutexes\r
806                                                         and the mutexes were given back in an order that is\r
807                                                         different to that in which they were taken. */\r
808                                                         queueYIELD_IF_USING_PREEMPTION();\r
809                                                 }\r
810                                                 else\r
811                                                 {\r
812                                                         mtCOVERAGE_TEST_MARKER();\r
813                                                 }\r
814                                         }\r
815                                 }\r
816                                 #else /* configUSE_QUEUE_SETS */\r
817                                 {\r
818                                         xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
819 \r
820                                         /* If there was a task waiting for data to arrive on the\r
821                                         queue then unblock it now. */\r
822                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
823                                         {\r
824                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
825                                                 {\r
826                                                         /* The unblocked task has a priority higher than\r
827                                                         our own so yield immediately.  Yes it is ok to do\r
828                                                         this from within the critical section - the kernel\r
829                                                         takes care of that. */\r
830                                                         queueYIELD_IF_USING_PREEMPTION();\r
831                                                 }\r
832                                                 else\r
833                                                 {\r
834                                                         mtCOVERAGE_TEST_MARKER();\r
835                                                 }\r
836                                         }\r
837                                         else if( xYieldRequired != pdFALSE )\r
838                                         {\r
839                                                 /* This path is a special case that will only get\r
840                                                 executed if the task was holding multiple mutexes and\r
841                                                 the mutexes were given back in an order that is\r
842                                                 different to that in which they were taken. */\r
843                                                 queueYIELD_IF_USING_PREEMPTION();\r
844                                         }\r
845                                         else\r
846                                         {\r
847                                                 mtCOVERAGE_TEST_MARKER();\r
848                                         }\r
849                                 }\r
850                                 #endif /* configUSE_QUEUE_SETS */\r
851 \r
852                                 taskEXIT_CRITICAL();\r
853                                 return pdPASS;\r
854                         }\r
855                         else\r
856                         {\r
857                                 if( xTicksToWait == ( TickType_t ) 0 )\r
858                                 {\r
859                                         /* The queue was full and no block time is specified (or\r
860                                         the block time has expired) so leave now. */\r
861                                         taskEXIT_CRITICAL();\r
862 \r
863                                         /* Return to the original privilege level before exiting\r
864                                         the function. */\r
865                                         traceQUEUE_SEND_FAILED( pxQueue );\r
866                                         return errQUEUE_FULL;\r
867                                 }\r
868                                 else if( xEntryTimeSet == pdFALSE )\r
869                                 {\r
870                                         /* The queue was full and a block time was specified so\r
871                                         configure the timeout structure. */\r
872                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
873                                         xEntryTimeSet = pdTRUE;\r
874                                 }\r
875                                 else\r
876                                 {\r
877                                         /* Entry time was already set. */\r
878                                         mtCOVERAGE_TEST_MARKER();\r
879                                 }\r
880                         }\r
881                 }\r
882                 taskEXIT_CRITICAL();\r
883 \r
884                 /* Interrupts and other tasks can send to and receive from the queue\r
885                 now the critical section has been exited. */\r
886 \r
887                 vTaskSuspendAll();\r
888                 prvLockQueue( pxQueue );\r
889 \r
890                 /* Update the timeout state to see if it has expired yet. */\r
891                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
892                 {\r
893                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
894                         {\r
895                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
896                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
897 \r
898                                 /* Unlocking the queue means queue events can effect the\r
899                                 event list.  It is possible that interrupts occurring now\r
900                                 remove this task from the event list again - but as the\r
901                                 scheduler is suspended the task will go onto the pending\r
902                                 ready last instead of the actual ready list. */\r
903                                 prvUnlockQueue( pxQueue );\r
904 \r
905                                 /* Resuming the scheduler will move tasks from the pending\r
906                                 ready list into the ready list - so it is feasible that this\r
907                                 task is already in a ready list before it yields - in which\r
908                                 case the yield will not cause a context switch unless there\r
909                                 is also a higher priority task in the pending ready list. */\r
910                                 if( xTaskResumeAll() == pdFALSE )\r
911                                 {\r
912                                         portYIELD_WITHIN_API();\r
913                                 }\r
914                         }\r
915                         else\r
916                         {\r
917                                 /* Try again. */\r
918                                 prvUnlockQueue( pxQueue );\r
919                                 ( void ) xTaskResumeAll();\r
920                         }\r
921                 }\r
922                 else\r
923                 {\r
924                         /* The timeout has expired. */\r
925                         prvUnlockQueue( pxQueue );\r
926                         ( void ) xTaskResumeAll();\r
927 \r
928                         traceQUEUE_SEND_FAILED( pxQueue );\r
929                         return errQUEUE_FULL;\r
930                 }\r
931         }\r
932 }\r
933 /*-----------------------------------------------------------*/\r
934 \r
935 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
936 {\r
937 BaseType_t xReturn;\r
938 UBaseType_t uxSavedInterruptStatus;\r
939 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
940 \r
941         configASSERT( pxQueue );\r
942         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
943         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
944 \r
945         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
946         system call (or maximum API call) interrupt priority.  Interrupts that are\r
947         above the maximum system call priority are kept permanently enabled, even\r
948         when the RTOS kernel is in a critical section, but cannot make any calls to\r
949         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
950         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
951         failure if a FreeRTOS API function is called from an interrupt that has been\r
952         assigned a priority above the configured maximum system call priority.\r
953         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
954         that have been assigned a priority at or (logically) below the maximum\r
955         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
956         safe API to ensure interrupt entry is as fast and as simple as possible.\r
957         More information (albeit Cortex-M specific) is provided on the following\r
958         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
959         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
960 \r
961         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
962         in the queue.  Also don't directly wake a task that was blocked on a queue\r
963         read, instead return a flag to say whether a context switch is required or\r
964         not (i.e. has a task with a higher priority than us been woken by this\r
965         post). */\r
966         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
967         {\r
968                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
969                 {\r
970                         const int8_t cTxLock = pxQueue->cTxLock;\r
971 \r
972                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
973 \r
974                         /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
975                         semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
976                         in a task disinheriting a priority and prvCopyDataToQueue() can be\r
977                         called here even though the disinherit function does not check if\r
978                         the scheduler is suspended before accessing the ready lists. */\r
979                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
980 \r
981                         /* The event list is not altered if the queue is locked.  This will\r
982                         be done when the queue is unlocked later. */\r
983                         if( cTxLock == queueUNLOCKED )\r
984                         {\r
985                                 #if ( configUSE_QUEUE_SETS == 1 )\r
986                                 {\r
987                                         if( pxQueue->pxQueueSetContainer != NULL )\r
988                                         {\r
989                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )\r
990                                                 {\r
991                                                         /* The queue is a member of a queue set, and posting\r
992                                                         to the queue set caused a higher priority task to\r
993                                                         unblock.  A context switch is required. */\r
994                                                         if( pxHigherPriorityTaskWoken != NULL )\r
995                                                         {\r
996                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
997                                                         }\r
998                                                         else\r
999                                                         {\r
1000                                                                 mtCOVERAGE_TEST_MARKER();\r
1001                                                         }\r
1002                                                 }\r
1003                                                 else\r
1004                                                 {\r
1005                                                         mtCOVERAGE_TEST_MARKER();\r
1006                                                 }\r
1007                                         }\r
1008                                         else\r
1009                                         {\r
1010                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1011                                                 {\r
1012                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1013                                                         {\r
1014                                                                 /* The task waiting has a higher priority so\r
1015                                                                 record that a context switch is required. */\r
1016                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1017                                                                 {\r
1018                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1019                                                                 }\r
1020                                                                 else\r
1021                                                                 {\r
1022                                                                         mtCOVERAGE_TEST_MARKER();\r
1023                                                                 }\r
1024                                                         }\r
1025                                                         else\r
1026                                                         {\r
1027                                                                 mtCOVERAGE_TEST_MARKER();\r
1028                                                         }\r
1029                                                 }\r
1030                                                 else\r
1031                                                 {\r
1032                                                         mtCOVERAGE_TEST_MARKER();\r
1033                                                 }\r
1034                                         }\r
1035                                 }\r
1036                                 #else /* configUSE_QUEUE_SETS */\r
1037                                 {\r
1038                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1039                                         {\r
1040                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1041                                                 {\r
1042                                                         /* The task waiting has a higher priority so record that a\r
1043                                                         context switch is required. */\r
1044                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1045                                                         {\r
1046                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1047                                                         }\r
1048                                                         else\r
1049                                                         {\r
1050                                                                 mtCOVERAGE_TEST_MARKER();\r
1051                                                         }\r
1052                                                 }\r
1053                                                 else\r
1054                                                 {\r
1055                                                         mtCOVERAGE_TEST_MARKER();\r
1056                                                 }\r
1057                                         }\r
1058                                         else\r
1059                                         {\r
1060                                                 mtCOVERAGE_TEST_MARKER();\r
1061                                         }\r
1062                                 }\r
1063                                 #endif /* configUSE_QUEUE_SETS */\r
1064                         }\r
1065                         else\r
1066                         {\r
1067                                 /* Increment the lock count so the task that unlocks the queue\r
1068                                 knows that data was posted while it was locked. */\r
1069                                 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
1070                         }\r
1071 \r
1072                         xReturn = pdPASS;\r
1073                 }\r
1074                 else\r
1075                 {\r
1076                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1077                         xReturn = errQUEUE_FULL;\r
1078                 }\r
1079         }\r
1080         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1081 \r
1082         return xReturn;\r
1083 }\r
1084 /*-----------------------------------------------------------*/\r
1085 \r
1086 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1087 {\r
1088 BaseType_t xReturn;\r
1089 UBaseType_t uxSavedInterruptStatus;\r
1090 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1091 \r
1092         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1093         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1094         read, instead return a flag to say whether a context switch is required or\r
1095         not (i.e. has a task with a higher priority than us been woken by this\r
1096         post). */\r
1097 \r
1098         configASSERT( pxQueue );\r
1099 \r
1100         /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
1101         if the item size is not 0. */\r
1102         configASSERT( pxQueue->uxItemSize == 0 );\r
1103 \r
1104         /* Normally a mutex would not be given from an interrupt, especially if\r
1105         there is a mutex holder, as priority inheritance makes no sense for an\r
1106         interrupts, only tasks. */\r
1107         configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );\r
1108 \r
1109         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1110         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1111         above the maximum system call priority are kept permanently enabled, even\r
1112         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1113         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1114         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1115         failure if a FreeRTOS API function is called from an interrupt that has been\r
1116         assigned a priority above the configured maximum system call priority.\r
1117         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1118         that have been assigned a priority at or (logically) below the maximum\r
1119         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1120         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1121         More information (albeit Cortex-M specific) is provided on the following\r
1122         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1123         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1124 \r
1125         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1126         {\r
1127                 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1128 \r
1129                 /* When the queue is used to implement a semaphore no data is ever\r
1130                 moved through the queue but it is still valid to see if the queue 'has\r
1131                 space'. */\r
1132                 if( uxMessagesWaiting < pxQueue->uxLength )\r
1133                 {\r
1134                         const int8_t cTxLock = pxQueue->cTxLock;\r
1135 \r
1136                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1137 \r
1138                         /* A task can only have an inherited priority if it is a mutex\r
1139                         holder - and if there is a mutex holder then the mutex cannot be\r
1140                         given from an ISR.  As this is the ISR version of the function it\r
1141                         can be assumed there is no mutex holder and no need to determine if\r
1142                         priority disinheritance is needed.  Simply increase the count of\r
1143                         messages (semaphores) available. */\r
1144                         pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;\r
1145 \r
1146                         /* The event list is not altered if the queue is locked.  This will\r
1147                         be done when the queue is unlocked later. */\r
1148                         if( cTxLock == queueUNLOCKED )\r
1149                         {\r
1150                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1151                                 {\r
1152                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1153                                         {\r
1154                                                 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )\r
1155                                                 {\r
1156                                                         /* The semaphore is a member of a queue set, and\r
1157                                                         posting to the queue set caused a higher priority\r
1158                                                         task to unblock.  A context switch is required. */\r
1159                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1160                                                         {\r
1161                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1162                                                         }\r
1163                                                         else\r
1164                                                         {\r
1165                                                                 mtCOVERAGE_TEST_MARKER();\r
1166                                                         }\r
1167                                                 }\r
1168                                                 else\r
1169                                                 {\r
1170                                                         mtCOVERAGE_TEST_MARKER();\r
1171                                                 }\r
1172                                         }\r
1173                                         else\r
1174                                         {\r
1175                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1176                                                 {\r
1177                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1178                                                         {\r
1179                                                                 /* The task waiting has a higher priority so\r
1180                                                                 record that a context switch is required. */\r
1181                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1182                                                                 {\r
1183                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1184                                                                 }\r
1185                                                                 else\r
1186                                                                 {\r
1187                                                                         mtCOVERAGE_TEST_MARKER();\r
1188                                                                 }\r
1189                                                         }\r
1190                                                         else\r
1191                                                         {\r
1192                                                                 mtCOVERAGE_TEST_MARKER();\r
1193                                                         }\r
1194                                                 }\r
1195                                                 else\r
1196                                                 {\r
1197                                                         mtCOVERAGE_TEST_MARKER();\r
1198                                                 }\r
1199                                         }\r
1200                                 }\r
1201                                 #else /* configUSE_QUEUE_SETS */\r
1202                                 {\r
1203                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1204                                         {\r
1205                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1206                                                 {\r
1207                                                         /* The task waiting has a higher priority so record that a\r
1208                                                         context switch is required. */\r
1209                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1210                                                         {\r
1211                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1212                                                         }\r
1213                                                         else\r
1214                                                         {\r
1215                                                                 mtCOVERAGE_TEST_MARKER();\r
1216                                                         }\r
1217                                                 }\r
1218                                                 else\r
1219                                                 {\r
1220                                                         mtCOVERAGE_TEST_MARKER();\r
1221                                                 }\r
1222                                         }\r
1223                                         else\r
1224                                         {\r
1225                                                 mtCOVERAGE_TEST_MARKER();\r
1226                                         }\r
1227                                 }\r
1228                                 #endif /* configUSE_QUEUE_SETS */\r
1229                         }\r
1230                         else\r
1231                         {\r
1232                                 /* Increment the lock count so the task that unlocks the queue\r
1233                                 knows that data was posted while it was locked. */\r
1234                                 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
1235                         }\r
1236 \r
1237                         xReturn = pdPASS;\r
1238                 }\r
1239                 else\r
1240                 {\r
1241                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1242                         xReturn = errQUEUE_FULL;\r
1243                 }\r
1244         }\r
1245         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1246 \r
1247         return xReturn;\r
1248 }\r
1249 /*-----------------------------------------------------------*/\r
1250 \r
1251 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )\r
1252 {\r
1253 BaseType_t xEntryTimeSet = pdFALSE;\r
1254 TimeOut_t xTimeOut;\r
1255 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1256 \r
1257         /* Check the pointer is not NULL. */\r
1258         configASSERT( ( pxQueue ) );\r
1259 \r
1260         /* The buffer into which data is received can only be NULL if the data size\r
1261         is zero (so no data is copied into the buffer. */\r
1262         configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1263 \r
1264         /* Cannot block if the scheduler is suspended. */\r
1265         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1266         {\r
1267                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1268         }\r
1269         #endif\r
1270 \r
1271 \r
1272         /* This function relaxes the coding standard somewhat to allow return\r
1273         statements within the function itself.  This is done in the interest\r
1274         of execution time efficiency. */\r
1275 \r
1276         for( ;; )\r
1277         {\r
1278                 taskENTER_CRITICAL();\r
1279                 {\r
1280                         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1281 \r
1282                         /* Is there data in the queue now?  To be running the calling task\r
1283                         must be the highest priority task wanting to access the queue. */\r
1284                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1285                         {\r
1286                                 /* Data available, remove one item. */\r
1287                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1288                                 traceQUEUE_RECEIVE( pxQueue );\r
1289                                 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;\r
1290 \r
1291                                 /* There is now space in the queue, were any tasks waiting to\r
1292                                 post to the queue?  If so, unblock the highest priority waiting\r
1293                                 task. */\r
1294                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1295                                 {\r
1296                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1297                                         {\r
1298                                                 queueYIELD_IF_USING_PREEMPTION();\r
1299                                         }\r
1300                                         else\r
1301                                         {\r
1302                                                 mtCOVERAGE_TEST_MARKER();\r
1303                                         }\r
1304                                 }\r
1305                                 else\r
1306                                 {\r
1307                                         mtCOVERAGE_TEST_MARKER();\r
1308                                 }\r
1309 \r
1310                                 taskEXIT_CRITICAL();\r
1311                                 return pdPASS;\r
1312                         }\r
1313                         else\r
1314                         {\r
1315                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1316                                 {\r
1317                                         /* The queue was empty and no block time is specified (or\r
1318                                         the block time has expired) so leave now. */\r
1319                                         taskEXIT_CRITICAL();\r
1320                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1321                                         return errQUEUE_EMPTY;\r
1322                                 }\r
1323                                 else if( xEntryTimeSet == pdFALSE )\r
1324                                 {\r
1325                                         /* The queue was empty and a block time was specified so\r
1326                                         configure the timeout structure. */\r
1327                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
1328                                         xEntryTimeSet = pdTRUE;\r
1329                                 }\r
1330                                 else\r
1331                                 {\r
1332                                         /* Entry time was already set. */\r
1333                                         mtCOVERAGE_TEST_MARKER();\r
1334                                 }\r
1335                         }\r
1336                 }\r
1337                 taskEXIT_CRITICAL();\r
1338 \r
1339                 /* Interrupts and other tasks can send to and receive from the queue\r
1340                 now the critical section has been exited. */\r
1341 \r
1342                 vTaskSuspendAll();\r
1343                 prvLockQueue( pxQueue );\r
1344 \r
1345                 /* Update the timeout state to see if it has expired yet. */\r
1346                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1347                 {\r
1348                         /* The timeout has not expired.  If the queue is still empty place\r
1349                         the task on the list of tasks waiting to receive from the queue. */\r
1350                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1351                         {\r
1352                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1353                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1354                                 prvUnlockQueue( pxQueue );\r
1355                                 if( xTaskResumeAll() == pdFALSE )\r
1356                                 {\r
1357                                         portYIELD_WITHIN_API();\r
1358                                 }\r
1359                                 else\r
1360                                 {\r
1361                                         mtCOVERAGE_TEST_MARKER();\r
1362                                 }\r
1363                         }\r
1364                         else\r
1365                         {\r
1366                                 /* The queue contains data again.  Loop back to try and read the\r
1367                                 data. */\r
1368                                 prvUnlockQueue( pxQueue );\r
1369                                 ( void ) xTaskResumeAll();\r
1370                         }\r
1371                 }\r
1372                 else\r
1373                 {\r
1374                         /* Timed out.  If there is no data in the queue exit, otherwise loop\r
1375                         back and attempt to read the data. */\r
1376                         prvUnlockQueue( pxQueue );\r
1377                         ( void ) xTaskResumeAll();\r
1378 \r
1379                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1380                         {\r
1381                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1382                                 return errQUEUE_EMPTY;\r
1383                         }\r
1384                         else\r
1385                         {\r
1386                                 mtCOVERAGE_TEST_MARKER();\r
1387                         }\r
1388                 }\r
1389         }\r
1390 }\r
1391 /*-----------------------------------------------------------*/\r
1392 \r
1393 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )\r
1394 {\r
1395 BaseType_t xEntryTimeSet = pdFALSE;\r
1396 TimeOut_t xTimeOut;\r
1397 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1398 \r
1399 #if( configUSE_MUTEXES == 1 )\r
1400         BaseType_t xInheritanceOccurred = pdFALSE;\r
1401 #endif\r
1402 \r
1403         /* Check the queue pointer is not NULL. */\r
1404         configASSERT( ( pxQueue ) );\r
1405 \r
1406         /* Check this really is a semaphore, in which case the item size will be\r
1407         0. */\r
1408         configASSERT( pxQueue->uxItemSize == 0 );\r
1409 \r
1410         /* Cannot block if the scheduler is suspended. */\r
1411         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1412         {\r
1413                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1414         }\r
1415         #endif\r
1416 \r
1417 \r
1418         /* This function relaxes the coding standard somewhat to allow return\r
1419         statements within the function itself.  This is done in the interest\r
1420         of execution time efficiency. */\r
1421 \r
1422         for( ;; )\r
1423         {\r
1424                 taskENTER_CRITICAL();\r
1425                 {\r
1426                         /* Semaphores are queues with an item size of 0, and where the\r
1427                         number of messages in the queue is the semaphore's count value. */\r
1428                         const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;\r
1429 \r
1430                         /* Is there data in the queue now?  To be running the calling task\r
1431                         must be the highest priority task wanting to access the queue. */\r
1432                         if( uxSemaphoreCount > ( UBaseType_t ) 0 )\r
1433                         {\r
1434                                 traceQUEUE_RECEIVE( pxQueue );\r
1435 \r
1436                                 /* Semaphores are queues with a data size of zero and where the\r
1437                                 messages waiting is the semaphore's count.  Reduce the count. */\r
1438                                 pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;\r
1439 \r
1440                                 #if ( configUSE_MUTEXES == 1 )\r
1441                                 {\r
1442                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1443                                         {\r
1444                                                 /* Record the information required to implement\r
1445                                                 priority inheritance should it become necessary. */\r
1446                                                 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1447                                         }\r
1448                                         else\r
1449                                         {\r
1450                                                 mtCOVERAGE_TEST_MARKER();\r
1451                                         }\r
1452                                 }\r
1453                                 #endif /* configUSE_MUTEXES */\r
1454 \r
1455                                 /* Check to see if other tasks are blocked waiting to give the\r
1456                                 semaphore, and if so, unblock the highest priority such task. */\r
1457                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1458                                 {\r
1459                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1460                                         {\r
1461                                                 queueYIELD_IF_USING_PREEMPTION();\r
1462                                         }\r
1463                                         else\r
1464                                         {\r
1465                                                 mtCOVERAGE_TEST_MARKER();\r
1466                                         }\r
1467                                 }\r
1468                                 else\r
1469                                 {\r
1470                                         mtCOVERAGE_TEST_MARKER();\r
1471                                 }\r
1472 \r
1473                                 taskEXIT_CRITICAL();\r
1474                                 return pdPASS;\r
1475                         }\r
1476                         else\r
1477                         {\r
1478                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1479                                 {\r
1480                                         /* For inheritance to have occurred there must have been an\r
1481                                         initial timeout, and an adjusted timeout cannot become 0, as\r
1482                                         if it were 0 the function would have exited. */\r
1483                                         #if( configUSE_MUTEXES == 1 )\r
1484                                         {\r
1485                                                 configASSERT( xInheritanceOccurred == pdFALSE );\r
1486                                         }\r
1487                                         #endif /* configUSE_MUTEXES */\r
1488 \r
1489                                         /* The semaphore count was 0 and no block time is specified\r
1490                                         (or the block time has expired) so exit now. */\r
1491                                         taskEXIT_CRITICAL();\r
1492                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1493                                         return errQUEUE_EMPTY;\r
1494                                 }\r
1495                                 else if( xEntryTimeSet == pdFALSE )\r
1496                                 {\r
1497                                         /* The semaphore count was 0 and a block time was specified\r
1498                                         so configure the timeout structure ready to block. */\r
1499                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
1500                                         xEntryTimeSet = pdTRUE;\r
1501                                 }\r
1502                                 else\r
1503                                 {\r
1504                                         /* Entry time was already set. */\r
1505                                         mtCOVERAGE_TEST_MARKER();\r
1506                                 }\r
1507                         }\r
1508                 }\r
1509                 taskEXIT_CRITICAL();\r
1510 \r
1511                 /* Interrupts and other tasks can give to and take from the semaphore\r
1512                 now the critical section has been exited. */\r
1513 \r
1514                 vTaskSuspendAll();\r
1515                 prvLockQueue( pxQueue );\r
1516 \r
1517                 /* Update the timeout state to see if it has expired yet. */\r
1518                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1519                 {\r
1520                         /* A block time is specified and not expired.  If the semaphore\r
1521                         count is 0 then enter the Blocked state to wait for a semaphore to\r
1522                         become available.  As semaphores are implemented with queues the\r
1523                         queue being empty is equivalent to the semaphore count being 0. */\r
1524                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1525                         {\r
1526                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1527 \r
1528                                 #if ( configUSE_MUTEXES == 1 )\r
1529                                 {\r
1530                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1531                                         {\r
1532                                                 taskENTER_CRITICAL();\r
1533                                                 {\r
1534                                                         xInheritanceOccurred = xTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1535                                                 }\r
1536                                                 taskEXIT_CRITICAL();\r
1537                                         }\r
1538                                         else\r
1539                                         {\r
1540                                                 mtCOVERAGE_TEST_MARKER();\r
1541                                         }\r
1542                                 }\r
1543                                 #endif\r
1544 \r
1545                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1546                                 prvUnlockQueue( pxQueue );\r
1547                                 if( xTaskResumeAll() == pdFALSE )\r
1548                                 {\r
1549                                         portYIELD_WITHIN_API();\r
1550                                 }\r
1551                                 else\r
1552                                 {\r
1553                                         mtCOVERAGE_TEST_MARKER();\r
1554                                 }\r
1555                         }\r
1556                         else\r
1557                         {\r
1558                                 /* There was no timeout and the semaphore count was not 0, so\r
1559                                 attempt to take the semaphore again. */\r
1560                                 prvUnlockQueue( pxQueue );\r
1561                                 ( void ) xTaskResumeAll();\r
1562                         }\r
1563                 }\r
1564                 else\r
1565                 {\r
1566                         /* Timed out. */\r
1567                         prvUnlockQueue( pxQueue );\r
1568                         ( void ) xTaskResumeAll();\r
1569 \r
1570                         /* If the semaphore count is 0 exit now as the timeout has\r
1571                         expired.  Otherwise return to attempt to take the semaphore that is\r
1572                         known to be available.  As semaphores are implemented by queues the\r
1573                         queue being empty is equivalent to the semaphore count being 0. */\r
1574                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1575                         {\r
1576                                 #if ( configUSE_MUTEXES == 1 )\r
1577                                 {\r
1578                                         /* xInheritanceOccurred could only have be set if\r
1579                                         pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to\r
1580                                         test the mutex type again to check it is actually a mutex. */\r
1581                                         if( xInheritanceOccurred != pdFALSE )\r
1582                                         {\r
1583                                                 taskENTER_CRITICAL();\r
1584                                                 {\r
1585                                                         UBaseType_t uxHighestWaitingPriority;\r
1586 \r
1587                                                         /* This task blocking on the mutex caused another\r
1588                                                         task to inherit this task's priority.  Now this task\r
1589                                                         has timed out the priority should be disinherited\r
1590                                                         again, but only as low as the next highest priority\r
1591                                                         task that is waiting for the same mutex. */\r
1592                                                         uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );\r
1593                                                         vTaskPriorityDisinheritAfterTimeout( ( void * ) pxQueue->pxMutexHolder, uxHighestWaitingPriority );\r
1594                                                 }\r
1595                                                 taskEXIT_CRITICAL();\r
1596                                         }\r
1597                                 }\r
1598                                 #endif /* configUSE_MUTEXES */\r
1599 \r
1600                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1601                                 return errQUEUE_EMPTY;\r
1602                         }\r
1603                         else\r
1604                         {\r
1605                                 mtCOVERAGE_TEST_MARKER();\r
1606                         }\r
1607                 }\r
1608         }\r
1609 }\r
1610 /*-----------------------------------------------------------*/\r
1611 \r
1612 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )\r
1613 {\r
1614 BaseType_t xEntryTimeSet = pdFALSE;\r
1615 TimeOut_t xTimeOut;\r
1616 int8_t *pcOriginalReadPosition;\r
1617 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1618 \r
1619         /* Check the pointer is not NULL. */\r
1620         configASSERT( ( pxQueue ) );\r
1621 \r
1622         /* The buffer into which data is received can only be NULL if the data size\r
1623         is zero (so no data is copied into the buffer. */\r
1624         configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1625 \r
1626         /* Cannot block if the scheduler is suspended. */\r
1627         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1628         {\r
1629                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1630         }\r
1631         #endif\r
1632 \r
1633 \r
1634         /* This function relaxes the coding standard somewhat to allow return\r
1635         statements within the function itself.  This is done in the interest\r
1636         of execution time efficiency. */\r
1637 \r
1638         for( ;; )\r
1639         {\r
1640                 taskENTER_CRITICAL();\r
1641                 {\r
1642                         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1643 \r
1644                         /* Is there data in the queue now?  To be running the calling task\r
1645                         must be the highest priority task wanting to access the queue. */\r
1646                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1647                         {\r
1648                                 /* Remember the read position so it can be reset after the data\r
1649                                 is read from the queue as this function is only peeking the\r
1650                                 data, not removing it. */\r
1651                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1652 \r
1653                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1654                                 traceQUEUE_PEEK( pxQueue );\r
1655 \r
1656                                 /* The data is not being removed, so reset the read pointer. */\r
1657                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1658 \r
1659                                 /* The data is being left in the queue, so see if there are\r
1660                                 any other tasks waiting for the data. */\r
1661                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1662                                 {\r
1663                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1664                                         {\r
1665                                                 /* The task waiting has a higher priority than this task. */\r
1666                                                 queueYIELD_IF_USING_PREEMPTION();\r
1667                                         }\r
1668                                         else\r
1669                                         {\r
1670                                                 mtCOVERAGE_TEST_MARKER();\r
1671                                         }\r
1672                                 }\r
1673                                 else\r
1674                                 {\r
1675                                         mtCOVERAGE_TEST_MARKER();\r
1676                                 }\r
1677 \r
1678                                 taskEXIT_CRITICAL();\r
1679                                 return pdPASS;\r
1680                         }\r
1681                         else\r
1682                         {\r
1683                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1684                                 {\r
1685                                         /* The queue was empty and no block time is specified (or\r
1686                                         the block time has expired) so leave now. */\r
1687                                         taskEXIT_CRITICAL();\r
1688                                         traceQUEUE_PEEK_FAILED( pxQueue );\r
1689                                         return errQUEUE_EMPTY;\r
1690                                 }\r
1691                                 else if( xEntryTimeSet == pdFALSE )\r
1692                                 {\r
1693                                         /* The queue was empty and a block time was specified so\r
1694                                         configure the timeout structure ready to enter the blocked\r
1695                                         state. */\r
1696                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
1697                                         xEntryTimeSet = pdTRUE;\r
1698                                 }\r
1699                                 else\r
1700                                 {\r
1701                                         /* Entry time was already set. */\r
1702                                         mtCOVERAGE_TEST_MARKER();\r
1703                                 }\r
1704                         }\r
1705                 }\r
1706                 taskEXIT_CRITICAL();\r
1707 \r
1708                 /* Interrupts and other tasks can send to and receive from the queue\r
1709                 now the critical section has been exited. */\r
1710 \r
1711                 vTaskSuspendAll();\r
1712                 prvLockQueue( pxQueue );\r
1713 \r
1714                 /* Update the timeout state to see if it has expired yet. */\r
1715                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1716                 {\r
1717                         /* Timeout has not expired yet, check to see if there is data in the\r
1718                         queue now, and if not enter the Blocked state to wait for data. */\r
1719                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1720                         {\r
1721                                 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );\r
1722                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1723                                 prvUnlockQueue( pxQueue );\r
1724                                 if( xTaskResumeAll() == pdFALSE )\r
1725                                 {\r
1726                                         portYIELD_WITHIN_API();\r
1727                                 }\r
1728                                 else\r
1729                                 {\r
1730                                         mtCOVERAGE_TEST_MARKER();\r
1731                                 }\r
1732                         }\r
1733                         else\r
1734                         {\r
1735                                 /* There is data in the queue now, so don't enter the blocked\r
1736                                 state, instead return to try and obtain the data. */\r
1737                                 prvUnlockQueue( pxQueue );\r
1738                                 ( void ) xTaskResumeAll();\r
1739                         }\r
1740                 }\r
1741                 else\r
1742                 {\r
1743                         /* The timeout has expired.  If there is still no data in the queue\r
1744                         exit, otherwise go back and try to read the data again. */\r
1745                         prvUnlockQueue( pxQueue );\r
1746                         ( void ) xTaskResumeAll();\r
1747 \r
1748                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1749                         {\r
1750                                 traceQUEUE_PEEK_FAILED( pxQueue );\r
1751                                 return errQUEUE_EMPTY;\r
1752                         }\r
1753                         else\r
1754                         {\r
1755                                 mtCOVERAGE_TEST_MARKER();\r
1756                         }\r
1757                 }\r
1758         }\r
1759 }\r
1760 /*-----------------------------------------------------------*/\r
1761 \r
1762 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1763 {\r
1764 BaseType_t xReturn;\r
1765 UBaseType_t uxSavedInterruptStatus;\r
1766 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1767 \r
1768         configASSERT( pxQueue );\r
1769         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1770 \r
1771         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1772         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1773         above the maximum system call priority are kept permanently enabled, even\r
1774         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1775         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1776         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1777         failure if a FreeRTOS API function is called from an interrupt that has been\r
1778         assigned a priority above the configured maximum system call priority.\r
1779         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1780         that have been assigned a priority at or (logically) below the maximum\r
1781         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1782         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1783         More information (albeit Cortex-M specific) is provided on the following\r
1784         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1785         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1786 \r
1787         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1788         {\r
1789                 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1790 \r
1791                 /* Cannot block in an ISR, so check there is data available. */\r
1792                 if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1793                 {\r
1794                         const int8_t cRxLock = pxQueue->cRxLock;\r
1795 \r
1796                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1797 \r
1798                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1799                         pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;\r
1800 \r
1801                         /* If the queue is locked the event list will not be modified.\r
1802                         Instead update the lock count so the task that unlocks the queue\r
1803                         will know that an ISR has removed data while the queue was\r
1804                         locked. */\r
1805                         if( cRxLock == queueUNLOCKED )\r
1806                         {\r
1807                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1808                                 {\r
1809                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1810                                         {\r
1811                                                 /* The task waiting has a higher priority than us so\r
1812                                                 force a context switch. */\r
1813                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1814                                                 {\r
1815                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1816                                                 }\r
1817                                                 else\r
1818                                                 {\r
1819                                                         mtCOVERAGE_TEST_MARKER();\r
1820                                                 }\r
1821                                         }\r
1822                                         else\r
1823                                         {\r
1824                                                 mtCOVERAGE_TEST_MARKER();\r
1825                                         }\r
1826                                 }\r
1827                                 else\r
1828                                 {\r
1829                                         mtCOVERAGE_TEST_MARKER();\r
1830                                 }\r
1831                         }\r
1832                         else\r
1833                         {\r
1834                                 /* Increment the lock count so the task that unlocks the queue\r
1835                                 knows that data was removed while it was locked. */\r
1836                                 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );\r
1837                         }\r
1838 \r
1839                         xReturn = pdPASS;\r
1840                 }\r
1841                 else\r
1842                 {\r
1843                         xReturn = pdFAIL;\r
1844                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1845                 }\r
1846         }\r
1847         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1848 \r
1849         return xReturn;\r
1850 }\r
1851 /*-----------------------------------------------------------*/\r
1852 \r
1853 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1854 {\r
1855 BaseType_t xReturn;\r
1856 UBaseType_t uxSavedInterruptStatus;\r
1857 int8_t *pcOriginalReadPosition;\r
1858 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1859 \r
1860         configASSERT( pxQueue );\r
1861         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1862         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1863 \r
1864         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1865         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1866         above the maximum system call priority are kept permanently enabled, even\r
1867         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1868         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1869         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1870         failure if a FreeRTOS API function is called from an interrupt that has been\r
1871         assigned a priority above the configured maximum system call priority.\r
1872         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1873         that have been assigned a priority at or (logically) below the maximum\r
1874         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1875         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1876         More information (albeit Cortex-M specific) is provided on the following\r
1877         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1878         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1879 \r
1880         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1881         {\r
1882                 /* Cannot block in an ISR, so check there is data available. */\r
1883                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1884                 {\r
1885                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1886 \r
1887                         /* Remember the read position so it can be reset as nothing is\r
1888                         actually being removed from the queue. */\r
1889                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1890                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1891                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1892 \r
1893                         xReturn = pdPASS;\r
1894                 }\r
1895                 else\r
1896                 {\r
1897                         xReturn = pdFAIL;\r
1898                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1899                 }\r
1900         }\r
1901         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1902 \r
1903         return xReturn;\r
1904 }\r
1905 /*-----------------------------------------------------------*/\r
1906 \r
1907 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1908 {\r
1909 UBaseType_t uxReturn;\r
1910 \r
1911         configASSERT( xQueue );\r
1912 \r
1913         taskENTER_CRITICAL();\r
1914         {\r
1915                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1916         }\r
1917         taskEXIT_CRITICAL();\r
1918 \r
1919         return uxReturn;\r
1920 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1921 /*-----------------------------------------------------------*/\r
1922 \r
1923 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1924 {\r
1925 UBaseType_t uxReturn;\r
1926 Queue_t *pxQueue;\r
1927 \r
1928         pxQueue = ( Queue_t * ) xQueue;\r
1929         configASSERT( pxQueue );\r
1930 \r
1931         taskENTER_CRITICAL();\r
1932         {\r
1933                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1934         }\r
1935         taskEXIT_CRITICAL();\r
1936 \r
1937         return uxReturn;\r
1938 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1939 /*-----------------------------------------------------------*/\r
1940 \r
1941 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1942 {\r
1943 UBaseType_t uxReturn;\r
1944 \r
1945         configASSERT( xQueue );\r
1946 \r
1947         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1948 \r
1949         return uxReturn;\r
1950 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1951 /*-----------------------------------------------------------*/\r
1952 \r
1953 void vQueueDelete( QueueHandle_t xQueue )\r
1954 {\r
1955 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1956 \r
1957         configASSERT( pxQueue );\r
1958         traceQUEUE_DELETE( pxQueue );\r
1959 \r
1960         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1961         {\r
1962                 vQueueUnregisterQueue( pxQueue );\r
1963         }\r
1964         #endif\r
1965 \r
1966         #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )\r
1967         {\r
1968                 /* The queue can only have been allocated dynamically - free it\r
1969                 again. */\r
1970                 vPortFree( pxQueue );\r
1971         }\r
1972         #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
1973         {\r
1974                 /* The queue could have been allocated statically or dynamically, so\r
1975                 check before attempting to free the memory. */\r
1976                 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )\r
1977                 {\r
1978                         vPortFree( pxQueue );\r
1979                 }\r
1980                 else\r
1981                 {\r
1982                         mtCOVERAGE_TEST_MARKER();\r
1983                 }\r
1984         }\r
1985         #else\r
1986         {\r
1987                 /* The queue must have been statically allocated, so is not going to be\r
1988                 deleted.  Avoid compiler warnings about the unused parameter. */\r
1989                 ( void ) pxQueue;\r
1990         }\r
1991         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
1992 }\r
1993 /*-----------------------------------------------------------*/\r
1994 \r
1995 #if ( configUSE_TRACE_FACILITY == 1 )\r
1996 \r
1997         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1998         {\r
1999                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
2000         }\r
2001 \r
2002 #endif /* configUSE_TRACE_FACILITY */\r
2003 /*-----------------------------------------------------------*/\r
2004 \r
2005 #if ( configUSE_TRACE_FACILITY == 1 )\r
2006 \r
2007         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
2008         {\r
2009                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
2010         }\r
2011 \r
2012 #endif /* configUSE_TRACE_FACILITY */\r
2013 /*-----------------------------------------------------------*/\r
2014 \r
2015 #if ( configUSE_TRACE_FACILITY == 1 )\r
2016 \r
2017         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
2018         {\r
2019                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
2020         }\r
2021 \r
2022 #endif /* configUSE_TRACE_FACILITY */\r
2023 /*-----------------------------------------------------------*/\r
2024 \r
2025 #if( configUSE_MUTEXES == 1 )\r
2026 \r
2027         static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )\r
2028         {\r
2029         UBaseType_t uxHighestPriorityOfWaitingTasks;\r
2030 \r
2031                 /* If a task waiting for a mutex causes the mutex holder to inherit a\r
2032                 priority, but the waiting task times out, then the holder should\r
2033                 disinherit the priority - but only down to the highest priority of any\r
2034                 other tasks that are waiting for the same mutex.  For this purpose,\r
2035                 return the priority of the highest priority task that is waiting for the\r
2036                 mutex. */\r
2037                 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0 )\r
2038                 {\r
2039                         uxHighestPriorityOfWaitingTasks = configMAX_PRIORITIES - listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );\r
2040                 }\r
2041                 else\r
2042                 {\r
2043                         uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;\r
2044                 }\r
2045 \r
2046                 return uxHighestPriorityOfWaitingTasks;\r
2047         }\r
2048 \r
2049 #endif /* configUSE_MUTEXES */\r
2050 /*-----------------------------------------------------------*/\r
2051 \r
2052 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
2053 {\r
2054 BaseType_t xReturn = pdFALSE;\r
2055 UBaseType_t uxMessagesWaiting;\r
2056 \r
2057         /* This function is called from a critical section. */\r
2058 \r
2059         uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
2060 \r
2061         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
2062         {\r
2063                 #if ( configUSE_MUTEXES == 1 )\r
2064                 {\r
2065                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
2066                         {\r
2067                                 /* The mutex is no longer being held. */\r
2068                                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
2069                                 pxQueue->pxMutexHolder = NULL;\r
2070                         }\r
2071                         else\r
2072                         {\r
2073                                 mtCOVERAGE_TEST_MARKER();\r
2074                         }\r
2075                 }\r
2076                 #endif /* configUSE_MUTEXES */\r
2077         }\r
2078         else if( xPosition == queueSEND_TO_BACK )\r
2079         {\r
2080                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
2081                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
2082                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
2083                 {\r
2084                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
2085                 }\r
2086                 else\r
2087                 {\r
2088                         mtCOVERAGE_TEST_MARKER();\r
2089                 }\r
2090         }\r
2091         else\r
2092         {\r
2093                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
2094                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
2095                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
2096                 {\r
2097                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
2098                 }\r
2099                 else\r
2100                 {\r
2101                         mtCOVERAGE_TEST_MARKER();\r
2102                 }\r
2103 \r
2104                 if( xPosition == queueOVERWRITE )\r
2105                 {\r
2106                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2107                         {\r
2108                                 /* An item is not being added but overwritten, so subtract\r
2109                                 one from the recorded number of items in the queue so when\r
2110                                 one is added again below the number of recorded items remains\r
2111                                 correct. */\r
2112                                 --uxMessagesWaiting;\r
2113                         }\r
2114                         else\r
2115                         {\r
2116                                 mtCOVERAGE_TEST_MARKER();\r
2117                         }\r
2118                 }\r
2119                 else\r
2120                 {\r
2121                         mtCOVERAGE_TEST_MARKER();\r
2122                 }\r
2123         }\r
2124 \r
2125         pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;\r
2126 \r
2127         return xReturn;\r
2128 }\r
2129 /*-----------------------------------------------------------*/\r
2130 \r
2131 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
2132 {\r
2133         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
2134         {\r
2135                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2136                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
2137                 {\r
2138                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2139                 }\r
2140                 else\r
2141                 {\r
2142                         mtCOVERAGE_TEST_MARKER();\r
2143                 }\r
2144                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
2145         }\r
2146 }\r
2147 /*-----------------------------------------------------------*/\r
2148 \r
2149 static void prvUnlockQueue( Queue_t * const pxQueue )\r
2150 {\r
2151         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
2152 \r
2153         /* The lock counts contains the number of extra data items placed or\r
2154         removed from the queue while the queue was locked.  When a queue is\r
2155         locked items can be added or removed, but the event lists cannot be\r
2156         updated. */\r
2157         taskENTER_CRITICAL();\r
2158         {\r
2159                 int8_t cTxLock = pxQueue->cTxLock;\r
2160 \r
2161                 /* See if data was added to the queue while it was locked. */\r
2162                 while( cTxLock > queueLOCKED_UNMODIFIED )\r
2163                 {\r
2164                         /* Data was posted while the queue was locked.  Are any tasks\r
2165                         blocked waiting for data to become available? */\r
2166                         #if ( configUSE_QUEUE_SETS == 1 )\r
2167                         {\r
2168                                 if( pxQueue->pxQueueSetContainer != NULL )\r
2169                                 {\r
2170                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )\r
2171                                         {\r
2172                                                 /* The queue is a member of a queue set, and posting to\r
2173                                                 the queue set caused a higher priority task to unblock.\r
2174                                                 A context switch is required. */\r
2175                                                 vTaskMissedYield();\r
2176                                         }\r
2177                                         else\r
2178                                         {\r
2179                                                 mtCOVERAGE_TEST_MARKER();\r
2180                                         }\r
2181                                 }\r
2182                                 else\r
2183                                 {\r
2184                                         /* Tasks that are removed from the event list will get\r
2185                                         added to the pending ready list as the scheduler is still\r
2186                                         suspended. */\r
2187                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2188                                         {\r
2189                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2190                                                 {\r
2191                                                         /* The task waiting has a higher priority so record that a\r
2192                                                         context switch is required. */\r
2193                                                         vTaskMissedYield();\r
2194                                                 }\r
2195                                                 else\r
2196                                                 {\r
2197                                                         mtCOVERAGE_TEST_MARKER();\r
2198                                                 }\r
2199                                         }\r
2200                                         else\r
2201                                         {\r
2202                                                 break;\r
2203                                         }\r
2204                                 }\r
2205                         }\r
2206                         #else /* configUSE_QUEUE_SETS */\r
2207                         {\r
2208                                 /* Tasks that are removed from the event list will get added to\r
2209                                 the pending ready list as the scheduler is still suspended. */\r
2210                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2211                                 {\r
2212                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2213                                         {\r
2214                                                 /* The task waiting has a higher priority so record that\r
2215                                                 a context switch is required. */\r
2216                                                 vTaskMissedYield();\r
2217                                         }\r
2218                                         else\r
2219                                         {\r
2220                                                 mtCOVERAGE_TEST_MARKER();\r
2221                                         }\r
2222                                 }\r
2223                                 else\r
2224                                 {\r
2225                                         break;\r
2226                                 }\r
2227                         }\r
2228                         #endif /* configUSE_QUEUE_SETS */\r
2229 \r
2230                         --cTxLock;\r
2231                 }\r
2232 \r
2233                 pxQueue->cTxLock = queueUNLOCKED;\r
2234         }\r
2235         taskEXIT_CRITICAL();\r
2236 \r
2237         /* Do the same for the Rx lock. */\r
2238         taskENTER_CRITICAL();\r
2239         {\r
2240                 int8_t cRxLock = pxQueue->cRxLock;\r
2241 \r
2242                 while( cRxLock > queueLOCKED_UNMODIFIED )\r
2243                 {\r
2244                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2245                         {\r
2246                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2247                                 {\r
2248                                         vTaskMissedYield();\r
2249                                 }\r
2250                                 else\r
2251                                 {\r
2252                                         mtCOVERAGE_TEST_MARKER();\r
2253                                 }\r
2254 \r
2255                                 --cRxLock;\r
2256                         }\r
2257                         else\r
2258                         {\r
2259                                 break;\r
2260                         }\r
2261                 }\r
2262 \r
2263                 pxQueue->cRxLock = queueUNLOCKED;\r
2264         }\r
2265         taskEXIT_CRITICAL();\r
2266 }\r
2267 /*-----------------------------------------------------------*/\r
2268 \r
2269 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
2270 {\r
2271 BaseType_t xReturn;\r
2272 \r
2273         taskENTER_CRITICAL();\r
2274         {\r
2275                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
2276                 {\r
2277                         xReturn = pdTRUE;\r
2278                 }\r
2279                 else\r
2280                 {\r
2281                         xReturn = pdFALSE;\r
2282                 }\r
2283         }\r
2284         taskEXIT_CRITICAL();\r
2285 \r
2286         return xReturn;\r
2287 }\r
2288 /*-----------------------------------------------------------*/\r
2289 \r
2290 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
2291 {\r
2292 BaseType_t xReturn;\r
2293 \r
2294         configASSERT( xQueue );\r
2295         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2296         {\r
2297                 xReturn = pdTRUE;\r
2298         }\r
2299         else\r
2300         {\r
2301                 xReturn = pdFALSE;\r
2302         }\r
2303 \r
2304         return xReturn;\r
2305 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2306 /*-----------------------------------------------------------*/\r
2307 \r
2308 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
2309 {\r
2310 BaseType_t xReturn;\r
2311 \r
2312         taskENTER_CRITICAL();\r
2313         {\r
2314                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2315                 {\r
2316                         xReturn = pdTRUE;\r
2317                 }\r
2318                 else\r
2319                 {\r
2320                         xReturn = pdFALSE;\r
2321                 }\r
2322         }\r
2323         taskEXIT_CRITICAL();\r
2324 \r
2325         return xReturn;\r
2326 }\r
2327 /*-----------------------------------------------------------*/\r
2328 \r
2329 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2330 {\r
2331 BaseType_t xReturn;\r
2332 \r
2333         configASSERT( xQueue );\r
2334         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
2335         {\r
2336                 xReturn = pdTRUE;\r
2337         }\r
2338         else\r
2339         {\r
2340                 xReturn = pdFALSE;\r
2341         }\r
2342 \r
2343         return xReturn;\r
2344 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2345 /*-----------------------------------------------------------*/\r
2346 \r
2347 #if ( configUSE_CO_ROUTINES == 1 )\r
2348 \r
2349         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2350         {\r
2351         BaseType_t xReturn;\r
2352         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2353 \r
2354                 /* If the queue is already full we may have to block.  A critical section\r
2355                 is required to prevent an interrupt removing something from the queue\r
2356                 between the check to see if the queue is full and blocking on the queue. */\r
2357                 portDISABLE_INTERRUPTS();\r
2358                 {\r
2359                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2360                         {\r
2361                                 /* The queue is full - do we want to block or just leave without\r
2362                                 posting? */\r
2363                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2364                                 {\r
2365                                         /* As this is called from a coroutine we cannot block directly, but\r
2366                                         return indicating that we need to block. */\r
2367                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2368                                         portENABLE_INTERRUPTS();\r
2369                                         return errQUEUE_BLOCKED;\r
2370                                 }\r
2371                                 else\r
2372                                 {\r
2373                                         portENABLE_INTERRUPTS();\r
2374                                         return errQUEUE_FULL;\r
2375                                 }\r
2376                         }\r
2377                 }\r
2378                 portENABLE_INTERRUPTS();\r
2379 \r
2380                 portDISABLE_INTERRUPTS();\r
2381                 {\r
2382                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2383                         {\r
2384                                 /* There is room in the queue, copy the data into the queue. */\r
2385                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2386                                 xReturn = pdPASS;\r
2387 \r
2388                                 /* Were any co-routines waiting for data to become available? */\r
2389                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2390                                 {\r
2391                                         /* In this instance the co-routine could be placed directly\r
2392                                         into the ready list as we are within a critical section.\r
2393                                         Instead the same pending ready list mechanism is used as if\r
2394                                         the event were caused from within an interrupt. */\r
2395                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2396                                         {\r
2397                                                 /* The co-routine waiting has a higher priority so record\r
2398                                                 that a yield might be appropriate. */\r
2399                                                 xReturn = errQUEUE_YIELD;\r
2400                                         }\r
2401                                         else\r
2402                                         {\r
2403                                                 mtCOVERAGE_TEST_MARKER();\r
2404                                         }\r
2405                                 }\r
2406                                 else\r
2407                                 {\r
2408                                         mtCOVERAGE_TEST_MARKER();\r
2409                                 }\r
2410                         }\r
2411                         else\r
2412                         {\r
2413                                 xReturn = errQUEUE_FULL;\r
2414                         }\r
2415                 }\r
2416                 portENABLE_INTERRUPTS();\r
2417 \r
2418                 return xReturn;\r
2419         }\r
2420 \r
2421 #endif /* configUSE_CO_ROUTINES */\r
2422 /*-----------------------------------------------------------*/\r
2423 \r
2424 #if ( configUSE_CO_ROUTINES == 1 )\r
2425 \r
2426         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2427         {\r
2428         BaseType_t xReturn;\r
2429         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2430 \r
2431                 /* If the queue is already empty we may have to block.  A critical section\r
2432                 is required to prevent an interrupt adding something to the queue\r
2433                 between the check to see if the queue is empty and blocking on the queue. */\r
2434                 portDISABLE_INTERRUPTS();\r
2435                 {\r
2436                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2437                         {\r
2438                                 /* There are no messages in the queue, do we want to block or just\r
2439                                 leave with nothing? */\r
2440                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2441                                 {\r
2442                                         /* As this is a co-routine we cannot block directly, but return\r
2443                                         indicating that we need to block. */\r
2444                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2445                                         portENABLE_INTERRUPTS();\r
2446                                         return errQUEUE_BLOCKED;\r
2447                                 }\r
2448                                 else\r
2449                                 {\r
2450                                         portENABLE_INTERRUPTS();\r
2451                                         return errQUEUE_FULL;\r
2452                                 }\r
2453                         }\r
2454                         else\r
2455                         {\r
2456                                 mtCOVERAGE_TEST_MARKER();\r
2457                         }\r
2458                 }\r
2459                 portENABLE_INTERRUPTS();\r
2460 \r
2461                 portDISABLE_INTERRUPTS();\r
2462                 {\r
2463                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2464                         {\r
2465                                 /* Data is available from the queue. */\r
2466                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2467                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2468                                 {\r
2469                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2470                                 }\r
2471                                 else\r
2472                                 {\r
2473                                         mtCOVERAGE_TEST_MARKER();\r
2474                                 }\r
2475                                 --( pxQueue->uxMessagesWaiting );\r
2476                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2477 \r
2478                                 xReturn = pdPASS;\r
2479 \r
2480                                 /* Were any co-routines waiting for space to become available? */\r
2481                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2482                                 {\r
2483                                         /* In this instance the co-routine could be placed directly\r
2484                                         into the ready list as we are within a critical section.\r
2485                                         Instead the same pending ready list mechanism is used as if\r
2486                                         the event were caused from within an interrupt. */\r
2487                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2488                                         {\r
2489                                                 xReturn = errQUEUE_YIELD;\r
2490                                         }\r
2491                                         else\r
2492                                         {\r
2493                                                 mtCOVERAGE_TEST_MARKER();\r
2494                                         }\r
2495                                 }\r
2496                                 else\r
2497                                 {\r
2498                                         mtCOVERAGE_TEST_MARKER();\r
2499                                 }\r
2500                         }\r
2501                         else\r
2502                         {\r
2503                                 xReturn = pdFAIL;\r
2504                         }\r
2505                 }\r
2506                 portENABLE_INTERRUPTS();\r
2507 \r
2508                 return xReturn;\r
2509         }\r
2510 \r
2511 #endif /* configUSE_CO_ROUTINES */\r
2512 /*-----------------------------------------------------------*/\r
2513 \r
2514 #if ( configUSE_CO_ROUTINES == 1 )\r
2515 \r
2516         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2517         {\r
2518         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2519 \r
2520                 /* Cannot block within an ISR so if there is no space on the queue then\r
2521                 exit without doing anything. */\r
2522                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2523                 {\r
2524                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2525 \r
2526                         /* We only want to wake one co-routine per ISR, so check that a\r
2527                         co-routine has not already been woken. */\r
2528                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2529                         {\r
2530                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2531                                 {\r
2532                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2533                                         {\r
2534                                                 return pdTRUE;\r
2535                                         }\r
2536                                         else\r
2537                                         {\r
2538                                                 mtCOVERAGE_TEST_MARKER();\r
2539                                         }\r
2540                                 }\r
2541                                 else\r
2542                                 {\r
2543                                         mtCOVERAGE_TEST_MARKER();\r
2544                                 }\r
2545                         }\r
2546                         else\r
2547                         {\r
2548                                 mtCOVERAGE_TEST_MARKER();\r
2549                         }\r
2550                 }\r
2551                 else\r
2552                 {\r
2553                         mtCOVERAGE_TEST_MARKER();\r
2554                 }\r
2555 \r
2556                 return xCoRoutinePreviouslyWoken;\r
2557         }\r
2558 \r
2559 #endif /* configUSE_CO_ROUTINES */\r
2560 /*-----------------------------------------------------------*/\r
2561 \r
2562 #if ( configUSE_CO_ROUTINES == 1 )\r
2563 \r
2564         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2565         {\r
2566         BaseType_t xReturn;\r
2567         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2568 \r
2569                 /* We cannot block from an ISR, so check there is data available. If\r
2570                 not then just leave without doing anything. */\r
2571                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2572                 {\r
2573                         /* Copy the data from the queue. */\r
2574                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2575                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2576                         {\r
2577                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2578                         }\r
2579                         else\r
2580                         {\r
2581                                 mtCOVERAGE_TEST_MARKER();\r
2582                         }\r
2583                         --( pxQueue->uxMessagesWaiting );\r
2584                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2585 \r
2586                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2587                         {\r
2588                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2589                                 {\r
2590                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2591                                         {\r
2592                                                 *pxCoRoutineWoken = pdTRUE;\r
2593                                         }\r
2594                                         else\r
2595                                         {\r
2596                                                 mtCOVERAGE_TEST_MARKER();\r
2597                                         }\r
2598                                 }\r
2599                                 else\r
2600                                 {\r
2601                                         mtCOVERAGE_TEST_MARKER();\r
2602                                 }\r
2603                         }\r
2604                         else\r
2605                         {\r
2606                                 mtCOVERAGE_TEST_MARKER();\r
2607                         }\r
2608 \r
2609                         xReturn = pdPASS;\r
2610                 }\r
2611                 else\r
2612                 {\r
2613                         xReturn = pdFAIL;\r
2614                 }\r
2615 \r
2616                 return xReturn;\r
2617         }\r
2618 \r
2619 #endif /* configUSE_CO_ROUTINES */\r
2620 /*-----------------------------------------------------------*/\r
2621 \r
2622 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2623 \r
2624         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2625         {\r
2626         UBaseType_t ux;\r
2627 \r
2628                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2629                 a free slot. */\r
2630                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2631                 {\r
2632                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2633                         {\r
2634                                 /* Store the information on this queue. */\r
2635                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2636                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2637 \r
2638                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2639                                 break;\r
2640                         }\r
2641                         else\r
2642                         {\r
2643                                 mtCOVERAGE_TEST_MARKER();\r
2644                         }\r
2645                 }\r
2646         }\r
2647 \r
2648 #endif /* configQUEUE_REGISTRY_SIZE */\r
2649 /*-----------------------------------------------------------*/\r
2650 \r
2651 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2652 \r
2653         const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2654         {\r
2655         UBaseType_t ux;\r
2656         const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2657 \r
2658                 /* Note there is nothing here to protect against another task adding or\r
2659                 removing entries from the registry while it is being searched. */\r
2660                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2661                 {\r
2662                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2663                         {\r
2664                                 pcReturn = xQueueRegistry[ ux ].pcQueueName;\r
2665                                 break;\r
2666                         }\r
2667                         else\r
2668                         {\r
2669                                 mtCOVERAGE_TEST_MARKER();\r
2670                         }\r
2671                 }\r
2672 \r
2673                 return pcReturn;\r
2674         } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */\r
2675 \r
2676 #endif /* configQUEUE_REGISTRY_SIZE */\r
2677 /*-----------------------------------------------------------*/\r
2678 \r
2679 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2680 \r
2681         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2682         {\r
2683         UBaseType_t ux;\r
2684 \r
2685                 /* See if the handle of the queue being unregistered in actually in the\r
2686                 registry. */\r
2687                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2688                 {\r
2689                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2690                         {\r
2691                                 /* Set the name to NULL to show that this slot if free again. */\r
2692                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2693 \r
2694                                 /* Set the handle to NULL to ensure the same queue handle cannot\r
2695                                 appear in the registry twice if it is added, removed, then\r
2696                                 added again. */\r
2697                                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;\r
2698                                 break;\r
2699                         }\r
2700                         else\r
2701                         {\r
2702                                 mtCOVERAGE_TEST_MARKER();\r
2703                         }\r
2704                 }\r
2705 \r
2706         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2707 \r
2708 #endif /* configQUEUE_REGISTRY_SIZE */\r
2709 /*-----------------------------------------------------------*/\r
2710 \r
2711 #if ( configUSE_TIMERS == 1 )\r
2712 \r
2713         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
2714         {\r
2715         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2716 \r
2717                 /* This function should not be called by application code hence the\r
2718                 'Restricted' in its name.  It is not part of the public API.  It is\r
2719                 designed for use by kernel code, and has special calling requirements.\r
2720                 It can result in vListInsert() being called on a list that can only\r
2721                 possibly ever have one item in it, so the list will be fast, but even\r
2722                 so it should be called with the scheduler locked and not from a critical\r
2723                 section. */\r
2724 \r
2725                 /* Only do anything if there are no messages in the queue.  This function\r
2726                 will not actually cause the task to block, just place it on a blocked\r
2727                 list.  It will not block until the scheduler is unlocked - at which\r
2728                 time a yield will be performed.  If an item is added to the queue while\r
2729                 the queue is locked, and the calling task blocks on the queue, then the\r
2730                 calling task will be immediately unblocked when the queue is unlocked. */\r
2731                 prvLockQueue( pxQueue );\r
2732                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2733                 {\r
2734                         /* There is nothing in the queue, block for the specified period. */\r
2735                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
2736                 }\r
2737                 else\r
2738                 {\r
2739                         mtCOVERAGE_TEST_MARKER();\r
2740                 }\r
2741                 prvUnlockQueue( pxQueue );\r
2742         }\r
2743 \r
2744 #endif /* configUSE_TIMERS */\r
2745 /*-----------------------------------------------------------*/\r
2746 \r
2747 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
2748 \r
2749         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2750         {\r
2751         QueueSetHandle_t pxQueue;\r
2752 \r
2753                 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2754 \r
2755                 return pxQueue;\r
2756         }\r
2757 \r
2758 #endif /* configUSE_QUEUE_SETS */\r
2759 /*-----------------------------------------------------------*/\r
2760 \r
2761 #if ( configUSE_QUEUE_SETS == 1 )\r
2762 \r
2763         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2764         {\r
2765         BaseType_t xReturn;\r
2766 \r
2767                 taskENTER_CRITICAL();\r
2768                 {\r
2769                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2770                         {\r
2771                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2772                                 xReturn = pdFAIL;\r
2773                         }\r
2774                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2775                         {\r
2776                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2777                                 items in the queue/semaphore. */\r
2778                                 xReturn = pdFAIL;\r
2779                         }\r
2780                         else\r
2781                         {\r
2782                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2783                                 xReturn = pdPASS;\r
2784                         }\r
2785                 }\r
2786                 taskEXIT_CRITICAL();\r
2787 \r
2788                 return xReturn;\r
2789         }\r
2790 \r
2791 #endif /* configUSE_QUEUE_SETS */\r
2792 /*-----------------------------------------------------------*/\r
2793 \r
2794 #if ( configUSE_QUEUE_SETS == 1 )\r
2795 \r
2796         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2797         {\r
2798         BaseType_t xReturn;\r
2799         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2800 \r
2801                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2802                 {\r
2803                         /* The queue was not a member of the set. */\r
2804                         xReturn = pdFAIL;\r
2805                 }\r
2806                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2807                 {\r
2808                         /* It is dangerous to remove a queue from a set when the queue is\r
2809                         not empty because the queue set will still hold pending events for\r
2810                         the queue. */\r
2811                         xReturn = pdFAIL;\r
2812                 }\r
2813                 else\r
2814                 {\r
2815                         taskENTER_CRITICAL();\r
2816                         {\r
2817                                 /* The queue is no longer contained in the set. */\r
2818                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2819                         }\r
2820                         taskEXIT_CRITICAL();\r
2821                         xReturn = pdPASS;\r
2822                 }\r
2823 \r
2824                 return xReturn;\r
2825         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2826 \r
2827 #endif /* configUSE_QUEUE_SETS */\r
2828 /*-----------------------------------------------------------*/\r
2829 \r
2830 #if ( configUSE_QUEUE_SETS == 1 )\r
2831 \r
2832         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2833         {\r
2834         QueueSetMemberHandle_t xReturn = NULL;\r
2835 \r
2836                 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2837                 return xReturn;\r
2838         }\r
2839 \r
2840 #endif /* configUSE_QUEUE_SETS */\r
2841 /*-----------------------------------------------------------*/\r
2842 \r
2843 #if ( configUSE_QUEUE_SETS == 1 )\r
2844 \r
2845         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2846         {\r
2847         QueueSetMemberHandle_t xReturn = NULL;\r
2848 \r
2849                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2850                 return xReturn;\r
2851         }\r
2852 \r
2853 #endif /* configUSE_QUEUE_SETS */\r
2854 /*-----------------------------------------------------------*/\r
2855 \r
2856 #if ( configUSE_QUEUE_SETS == 1 )\r
2857 \r
2858         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2859         {\r
2860         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2861         BaseType_t xReturn = pdFALSE;\r
2862 \r
2863                 /* This function must be called form a critical section. */\r
2864 \r
2865                 configASSERT( pxQueueSetContainer );\r
2866                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2867 \r
2868                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2869                 {\r
2870                         const int8_t cTxLock = pxQueueSetContainer->cTxLock;\r
2871 \r
2872                         traceQUEUE_SEND( pxQueueSetContainer );\r
2873 \r
2874                         /* The data copied is the handle of the queue that contains data. */\r
2875                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2876 \r
2877                         if( cTxLock == queueUNLOCKED )\r
2878                         {\r
2879                                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2880                                 {\r
2881                                         if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2882                                         {\r
2883                                                 /* The task waiting has a higher priority. */\r
2884                                                 xReturn = pdTRUE;\r
2885                                         }\r
2886                                         else\r
2887                                         {\r
2888                                                 mtCOVERAGE_TEST_MARKER();\r
2889                                         }\r
2890                                 }\r
2891                                 else\r
2892                                 {\r
2893                                         mtCOVERAGE_TEST_MARKER();\r
2894                                 }\r
2895                         }\r
2896                         else\r
2897                         {\r
2898                                 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
2899                         }\r
2900                 }\r
2901                 else\r
2902                 {\r
2903                         mtCOVERAGE_TEST_MARKER();\r
2904                 }\r
2905 \r
2906                 return xReturn;\r
2907         }\r
2908 \r
2909 #endif /* configUSE_QUEUE_SETS */\r
2910 \r
2911 \r
2912 \r
2913 \r
2914 \r
2915 \r
2916 \r
2917 \r
2918 \r
2919 \r
2920 \r
2921 \r