2 FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
\r
13 ***************************************************************************
\r
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
15 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
16 >>! obliged to provide the source code for proprietary components !<<
\r
17 >>! outside of the FreeRTOS kernel. !<<
\r
18 ***************************************************************************
\r
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
23 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * FreeRTOS provides completely free yet professionally developed, *
\r
28 * robust, strictly quality controlled, supported, and cross *
\r
29 * platform software that is more than just the market leader, it *
\r
30 * is the industry's de facto standard. *
\r
32 * Help yourself get started quickly while simultaneously helping *
\r
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
34 * tutorial book, reference manual, or both: *
\r
35 * http://www.FreeRTOS.org/Documentation *
\r
37 ***************************************************************************
\r
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
\r
40 the FAQ page "My application does not run, what could be wrong?". Have you
\r
41 defined configASSERT()?
\r
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
\r
44 embedded software for free we request you assist our global community by
\r
45 participating in the support forum.
\r
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
\r
48 be as productive as possible as early as possible. Now you can receive
\r
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
\r
50 Ltd, and the world's leading authority on the world's leading RTOS.
\r
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
\r
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
61 licenses offer ticketed support, indemnification and commercial middleware.
\r
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
64 engineered and independently SIL3 certified version for use in safety and
\r
65 mission critical applications that require provable dependability.
\r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
74 all the API functions to use the MPU wrappers. That should only be done when
\r
75 task.h is included from an application file. */
\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
78 #include "FreeRTOS.h"
\r
82 #if ( configUSE_CO_ROUTINES == 1 )
\r
83 #include "croutine.h"
\r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
88 header files above, but not in this file, in order to generate the correct
\r
89 privileged Vs unprivileged linkage and placement. */
\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
93 /* Constants used with the xRxLock and xTxLock structure members. */
\r
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
98 pcTail members are used as pointers into the queue storage area. When the
\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
102 names to the pcHead and pcTail structure members to ensure the readability of
\r
103 the code is maintained despite this dual use of two structure members. An
\r
104 alternative implementation would be to use a union, but use of a union is
\r
105 against the coding standard (although an exception to the standard has been
\r
106 permitted where the dual use also significantly changes the type of the
\r
107 structure member). */
\r
108 #define pxMutexHolder pcTail
\r
109 #define uxQueueType pcHead
\r
110 #define queueQUEUE_IS_MUTEX NULL
\r
112 /* Semaphores do not actually store or copy data, so have an item size of
\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
117 /* Bits that can be set in xQUEUE->ucStaticAllocationFlags to indicate that the
\r
118 queue storage area and queue structure were statically allocated respectively.
\r
119 When these are statically allocated they won't be freed if the queue gets
\r
121 #define queueSTATICALLY_ALLOCATED_STORAGE ( ( uint8_t ) 0x01 )
\r
122 #define queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ( ( uint8_t ) 0x02 )
\r
124 #if( configUSE_PREEMPTION == 0 )
\r
125 /* If the cooperative scheduler is being used then a yield should not be
\r
126 performed just because a higher priority task has been woken. */
\r
127 #define queueYIELD_IF_USING_PREEMPTION()
\r
129 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
133 * Definition of the queue used by the scheduler.
\r
134 * Items are queued by copy, not reference. See the following link for the
\r
135 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
137 typedef struct QueueDefinition
\r
139 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
140 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
141 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
143 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
145 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
146 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
149 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
150 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
152 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
153 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
154 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
156 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
157 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
159 #if ( configUSE_QUEUE_SETS == 1 )
\r
160 struct QueueDefinition *pxQueueSetContainer;
\r
163 #if ( configUSE_TRACE_FACILITY == 1 )
\r
164 UBaseType_t uxQueueNumber;
\r
165 uint8_t ucQueueType;
\r
168 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
169 uint8_t ucStaticAllocationFlags;
\r
174 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
175 name below to enable the use of older kernel aware debuggers. */
\r
176 typedef xQUEUE Queue_t;
\r
178 /*-----------------------------------------------------------*/
\r
181 * The queue registry is just a means for kernel aware debuggers to locate
\r
182 * queue structures. It has no other purpose so is an optional component.
\r
184 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
186 /* The type stored within the queue registry array. This allows a name
\r
187 to be assigned to each queue making kernel aware debugging a little
\r
188 more user friendly. */
\r
189 typedef struct QUEUE_REGISTRY_ITEM
\r
191 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
192 QueueHandle_t xHandle;
\r
193 } xQueueRegistryItem;
\r
195 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
196 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
198 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
200 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
201 The pcQueueName member of a structure being NULL is indicative of the
\r
202 array position being vacant. */
\r
203 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
205 #endif /* configQUEUE_REGISTRY_SIZE */
\r
208 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
209 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
210 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
211 * queue is locked it will instead increment the appropriate queue lock count
\r
212 * to indicate that a task may require unblocking. When the queue in unlocked
\r
213 * these lock counts are inspected, and the appropriate action taken.
\r
215 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
218 * Uses a critical section to determine if there is any data in a queue.
\r
220 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
222 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
225 * Uses a critical section to determine if there is any space in a queue.
\r
227 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
229 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
232 * Copies an item into the queue, either at the front of the queue or the
\r
233 * back of the queue.
\r
235 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
238 * Copies an item out of a queue.
\r
240 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
243 * A queue requires two blocks of memory; a structure to hold the queue state
\r
244 * and a storage area to hold the items in the queue. The memory is assigned
\r
245 * by prvAllocateQueueMemory(). If ppucQueueStorage is NULL then the queue
\r
246 * storage will allocated dynamically, otherwise the buffer passed in
\r
247 * ppucQueueStorage will be used. If pxStaticQueue is NULL then the queue
\r
248 * structure will be allocated dynamically, otherwise the buffer pointed to by
\r
249 * pxStaticQueue will be used.
\r
251 static Queue_t *prvAllocateQueueMemory( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t **ppucQueueStorage, StaticQueue_t *pxStaticQueue );
\r
253 #if ( configUSE_QUEUE_SETS == 1 )
\r
255 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
256 * the queue set that the queue contains data.
\r
258 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
261 /*-----------------------------------------------------------*/
\r
264 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
265 * accessing the queue event lists.
\r
267 #define prvLockQueue( pxQueue ) \
\r
268 taskENTER_CRITICAL(); \
\r
270 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
272 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
274 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
276 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
279 taskEXIT_CRITICAL()
\r
280 /*-----------------------------------------------------------*/
\r
282 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
284 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
286 configASSERT( pxQueue );
\r
288 taskENTER_CRITICAL();
\r
290 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
291 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
292 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
293 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
294 pxQueue->xRxLock = queueUNLOCKED;
\r
295 pxQueue->xTxLock = queueUNLOCKED;
\r
297 if( xNewQueue == pdFALSE )
\r
299 /* If there are tasks blocked waiting to read from the queue, then
\r
300 the tasks will remain blocked as after this function exits the queue
\r
301 will still be empty. If there are tasks blocked waiting to write to
\r
302 the queue, then one should be unblocked as after this function exits
\r
303 it will be possible to write to it. */
\r
304 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
306 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
308 queueYIELD_IF_USING_PREEMPTION();
\r
312 mtCOVERAGE_TEST_MARKER();
\r
317 mtCOVERAGE_TEST_MARKER();
\r
322 /* Ensure the event queues start in the correct state. */
\r
323 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
324 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
327 taskEXIT_CRITICAL();
\r
329 /* A value is returned for calling semantic consistency with previous
\r
333 /*-----------------------------------------------------------*/
\r
335 static Queue_t *prvAllocateQueueMemory( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t **ppucQueueStorage, StaticQueue_t *pxStaticQueue )
\r
337 Queue_t *pxNewQueue;
\r
338 size_t xQueueSizeInBytes;
\r
340 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
342 #if( ( configASSERT_DEFINED == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
344 /* Sanity check that the size of the structure used to declare a
\r
345 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
\r
346 the real queue and semaphore structures. */
\r
347 volatile size_t xSize = sizeof( StaticQueue_t );
\r
348 configASSERT( xSize == sizeof( Queue_t ) );
\r
350 #endif /* configASSERT_DEFINED */
\r
352 if( uxItemSize == ( UBaseType_t ) 0 )
\r
354 /* There is not going to be a queue storage area. */
\r
355 xQueueSizeInBytes = ( size_t ) 0;
\r
359 /* Allocate enough space to hold the maximum number of items that can be
\r
360 in the queue at any time. */
\r
361 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
364 #if( configSUPPORT_STATIC_ALLOCATION == 0 )
\r
366 /* Allocate the new queue structure and storage area. */
\r
367 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
369 if( pxNewQueue != NULL )
\r
371 /* Jump past the queue structure to find the location of the queue
\r
373 *ppucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
376 /* The pxStaticQueue parameter is not used. Remove compiler warnings. */
\r
377 ( void ) pxStaticQueue;
\r
381 if( pxStaticQueue == NULL )
\r
383 /* A statically allocated queue was not passed in, so create one
\r
385 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
386 pxNewQueue->ucStaticAllocationFlags = 0;
\r
390 /* The address of a statically allocated queue was passed in, use
\r
391 it and note that the queue was not dynamically allocated so there is
\r
392 no attempt to free it again should the queue be deleted. */
\r
393 pxNewQueue = ( Queue_t * ) pxStaticQueue;
\r
394 pxNewQueue->ucStaticAllocationFlags = queueSTATICALLY_ALLOCATED_QUEUE_STRUCT;
\r
397 if( pxNewQueue != NULL )
\r
399 if( ( *ppucQueueStorage == NULL ) && ( xQueueSizeInBytes > 0 ) )
\r
401 /* A statically allocated queue storage area was not passed in,
\r
402 so allocate the queue storage area dynamically. */
\r
403 *ppucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
\r
405 if( *ppucQueueStorage == NULL )
\r
407 /* The queue storage area could not be created, so free the
\r
408 queue structure also. */
\r
409 if( ( pxNewQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ) == 0 )
\r
411 vPortFree( ( void * ) pxNewQueue );
\r
415 mtCOVERAGE_TEST_MARKER();
\r
422 mtCOVERAGE_TEST_MARKER();
\r
427 /* Note the fact that either the queue storage area was passed
\r
428 into this function, or the size requirement for the queue
\r
429 storage area was zero - either way no attempt should be made to
\r
430 free the queue storage area if the queue is deleted. */
\r
431 pxNewQueue->ucStaticAllocationFlags |= queueSTATICALLY_ALLOCATED_STORAGE;
\r
439 /*-----------------------------------------------------------*/
\r
441 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
\r
443 Queue_t *pxNewQueue;
\r
445 /* Remove compiler warnings about unused parameters should
\r
446 configUSE_TRACE_FACILITY not be set to 1. */
\r
447 ( void ) ucQueueType;
\r
449 /* A queue requires a queue structure and a queue storage area. These may
\r
450 be allocated statically or dynamically, depending on the parameter
\r
452 pxNewQueue = prvAllocateQueueMemory( uxQueueLength, uxItemSize, &pucQueueStorage, pxStaticQueue );
\r
454 if( pxNewQueue != NULL )
\r
456 if( uxItemSize == ( UBaseType_t ) 0 )
\r
458 /* No RAM was allocated for the queue storage area, but PC head
\r
459 cannot be set to NULL because NULL is used as a key to say the queue
\r
460 is used as a mutex. Therefore just set pcHead to point to the queue
\r
461 as a benign value that is known to be within the memory map. */
\r
462 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
466 /* Set the head to the start of the queue storage area. */
\r
467 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
\r
470 /* Initialise the queue members as described where the queue type is
\r
472 pxNewQueue->uxLength = uxQueueLength;
\r
473 pxNewQueue->uxItemSize = uxItemSize;
\r
474 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
476 #if ( configUSE_TRACE_FACILITY == 1 )
\r
478 pxNewQueue->ucQueueType = ucQueueType;
\r
480 #endif /* configUSE_TRACE_FACILITY */
\r
482 #if( configUSE_QUEUE_SETS == 1 )
\r
484 pxNewQueue->pxQueueSetContainer = NULL;
\r
486 #endif /* configUSE_QUEUE_SETS */
\r
488 traceQUEUE_CREATE( pxNewQueue );
\r
492 mtCOVERAGE_TEST_MARKER();
\r
495 configASSERT( pxNewQueue );
\r
497 return ( QueueHandle_t ) pxNewQueue;
\r
499 /*-----------------------------------------------------------*/
\r
501 #if ( configUSE_MUTEXES == 1 )
\r
503 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
505 Queue_t *pxNewQueue;
\r
507 /* Prevent compiler warnings about unused parameters if
\r
508 configUSE_TRACE_FACILITY does not equal 1. */
\r
509 ( void ) ucQueueType;
\r
511 /* Allocate the new queue structure. */
\r
512 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
513 if( pxNewQueue != NULL )
\r
515 /* Information required for priority inheritance. */
\r
516 pxNewQueue->pxMutexHolder = NULL;
\r
517 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
519 /* Queues used as a mutex no data is actually copied into or out
\r
521 pxNewQueue->pcWriteTo = NULL;
\r
522 pxNewQueue->u.pcReadFrom = NULL;
\r
524 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
525 an item size of 0 as nothing is actually copied into or out
\r
527 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
528 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
529 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
530 pxNewQueue->xRxLock = queueUNLOCKED;
\r
531 pxNewQueue->xTxLock = queueUNLOCKED;
\r
533 #if ( configUSE_TRACE_FACILITY == 1 )
\r
535 pxNewQueue->ucQueueType = ucQueueType;
\r
539 #if ( configUSE_QUEUE_SETS == 1 )
\r
541 pxNewQueue->pxQueueSetContainer = NULL;
\r
545 /* Ensure the event queues start with the correct state. */
\r
546 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
547 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
549 traceCREATE_MUTEX( pxNewQueue );
\r
551 /* Start with the semaphore in the expected state. */
\r
552 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
556 traceCREATE_MUTEX_FAILED();
\r
562 #endif /* configUSE_MUTEXES */
\r
563 /*-----------------------------------------------------------*/
\r
565 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
567 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
571 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
572 be called directly. Note: This is a good way of determining if the
\r
573 calling task is the mutex holder, but not a good way of determining the
\r
574 identity of the mutex holder, as the holder may change between the
\r
575 following critical section exiting and the function returning. */
\r
576 taskENTER_CRITICAL();
\r
578 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
580 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
587 taskEXIT_CRITICAL();
\r
590 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
593 /*-----------------------------------------------------------*/
\r
595 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
597 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
599 BaseType_t xReturn;
\r
600 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
602 configASSERT( pxMutex );
\r
604 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
605 change outside of this task. If this task does not hold the mutex then
\r
606 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
607 this is the only condition we are interested in it does not matter if
\r
608 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
609 mutual exclusion is required to test the pxMutexHolder variable. */
\r
610 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
612 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
614 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
615 the task handle, therefore no underflow check is required. Also,
\r
616 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
617 there can only be one, no mutual exclusion is required to modify the
\r
618 uxRecursiveCallCount member. */
\r
619 ( pxMutex->u.uxRecursiveCallCount )--;
\r
621 /* Have we unwound the call count? */
\r
622 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
624 /* Return the mutex. This will automatically unblock any other
\r
625 task that might be waiting to access the mutex. */
\r
626 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
630 mtCOVERAGE_TEST_MARKER();
\r
637 /* The mutex cannot be given because the calling task is not the
\r
641 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
647 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
648 /*-----------------------------------------------------------*/
\r
650 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
652 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
654 BaseType_t xReturn;
\r
655 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
657 configASSERT( pxMutex );
\r
659 /* Comments regarding mutual exclusion as per those within
\r
660 xQueueGiveMutexRecursive(). */
\r
662 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
664 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
666 ( pxMutex->u.uxRecursiveCallCount )++;
\r
671 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
673 /* pdPASS will only be returned if the mutex was successfully
\r
674 obtained. The calling task may have entered the Blocked state
\r
675 before reaching here. */
\r
676 if( xReturn == pdPASS )
\r
678 ( pxMutex->u.uxRecursiveCallCount )++;
\r
682 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
689 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
690 /*-----------------------------------------------------------*/
\r
692 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
694 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
696 QueueHandle_t xHandle;
\r
698 configASSERT( uxMaxCount != 0 );
\r
699 configASSERT( uxInitialCount <= uxMaxCount );
\r
701 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, NULL, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
703 if( xHandle != NULL )
\r
705 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
707 traceCREATE_COUNTING_SEMAPHORE();
\r
711 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
714 configASSERT( xHandle );
\r
718 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
719 /*-----------------------------------------------------------*/
\r
721 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
723 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
724 TimeOut_t xTimeOut;
\r
725 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
727 configASSERT( pxQueue );
\r
728 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
729 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
730 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
732 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
737 /* This function relaxes the coding standard somewhat to allow return
\r
738 statements within the function itself. This is done in the interest
\r
739 of execution time efficiency. */
\r
742 taskENTER_CRITICAL();
\r
744 /* Is there room on the queue now? The running task must be the
\r
745 highest priority task wanting to access the queue. If the head item
\r
746 in the queue is to be overwritten then it does not matter if the
\r
748 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
750 traceQUEUE_SEND( pxQueue );
\r
751 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
753 #if ( configUSE_QUEUE_SETS == 1 )
\r
755 if( pxQueue->pxQueueSetContainer != NULL )
\r
757 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
759 /* The queue is a member of a queue set, and posting
\r
760 to the queue set caused a higher priority task to
\r
761 unblock. A context switch is required. */
\r
762 queueYIELD_IF_USING_PREEMPTION();
\r
766 mtCOVERAGE_TEST_MARKER();
\r
771 /* If there was a task waiting for data to arrive on the
\r
772 queue then unblock it now. */
\r
773 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
775 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
777 /* The unblocked task has a priority higher than
\r
778 our own so yield immediately. Yes it is ok to
\r
779 do this from within the critical section - the
\r
780 kernel takes care of that. */
\r
781 queueYIELD_IF_USING_PREEMPTION();
\r
785 mtCOVERAGE_TEST_MARKER();
\r
788 else if( xYieldRequired != pdFALSE )
\r
790 /* This path is a special case that will only get
\r
791 executed if the task was holding multiple mutexes
\r
792 and the mutexes were given back in an order that is
\r
793 different to that in which they were taken. */
\r
794 queueYIELD_IF_USING_PREEMPTION();
\r
798 mtCOVERAGE_TEST_MARKER();
\r
802 #else /* configUSE_QUEUE_SETS */
\r
804 /* If there was a task waiting for data to arrive on the
\r
805 queue then unblock it now. */
\r
806 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
808 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
810 /* The unblocked task has a priority higher than
\r
811 our own so yield immediately. Yes it is ok to do
\r
812 this from within the critical section - the kernel
\r
813 takes care of that. */
\r
814 queueYIELD_IF_USING_PREEMPTION();
\r
818 mtCOVERAGE_TEST_MARKER();
\r
821 else if( xYieldRequired != pdFALSE )
\r
823 /* This path is a special case that will only get
\r
824 executed if the task was holding multiple mutexes and
\r
825 the mutexes were given back in an order that is
\r
826 different to that in which they were taken. */
\r
827 queueYIELD_IF_USING_PREEMPTION();
\r
831 mtCOVERAGE_TEST_MARKER();
\r
834 #endif /* configUSE_QUEUE_SETS */
\r
836 taskEXIT_CRITICAL();
\r
841 if( xTicksToWait == ( TickType_t ) 0 )
\r
843 /* The queue was full and no block time is specified (or
\r
844 the block time has expired) so leave now. */
\r
845 taskEXIT_CRITICAL();
\r
847 /* Return to the original privilege level before exiting
\r
849 traceQUEUE_SEND_FAILED( pxQueue );
\r
850 return errQUEUE_FULL;
\r
852 else if( xEntryTimeSet == pdFALSE )
\r
854 /* The queue was full and a block time was specified so
\r
855 configure the timeout structure. */
\r
856 vTaskSetTimeOutState( &xTimeOut );
\r
857 xEntryTimeSet = pdTRUE;
\r
861 /* Entry time was already set. */
\r
862 mtCOVERAGE_TEST_MARKER();
\r
866 taskEXIT_CRITICAL();
\r
868 /* Interrupts and other tasks can send to and receive from the queue
\r
869 now the critical section has been exited. */
\r
872 prvLockQueue( pxQueue );
\r
874 /* Update the timeout state to see if it has expired yet. */
\r
875 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
877 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
879 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
880 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
882 /* Unlocking the queue means queue events can effect the
\r
883 event list. It is possible that interrupts occurring now
\r
884 remove this task from the event list again - but as the
\r
885 scheduler is suspended the task will go onto the pending
\r
886 ready last instead of the actual ready list. */
\r
887 prvUnlockQueue( pxQueue );
\r
889 /* Resuming the scheduler will move tasks from the pending
\r
890 ready list into the ready list - so it is feasible that this
\r
891 task is already in a ready list before it yields - in which
\r
892 case the yield will not cause a context switch unless there
\r
893 is also a higher priority task in the pending ready list. */
\r
894 if( xTaskResumeAll() == pdFALSE )
\r
896 portYIELD_WITHIN_API();
\r
902 prvUnlockQueue( pxQueue );
\r
903 ( void ) xTaskResumeAll();
\r
908 /* The timeout has expired. */
\r
909 prvUnlockQueue( pxQueue );
\r
910 ( void ) xTaskResumeAll();
\r
912 traceQUEUE_SEND_FAILED( pxQueue );
\r
913 return errQUEUE_FULL;
\r
917 /*-----------------------------------------------------------*/
\r
919 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
921 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
923 BaseType_t xEntryTimeSet = pdFALSE;
\r
924 TimeOut_t xTimeOut;
\r
925 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
927 configASSERT( pxQueue );
\r
928 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
932 taskENTER_CRITICAL();
\r
934 /* Is there room on the queue now? To be running we must be
\r
935 the highest priority task wanting to access the queue. */
\r
936 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
938 traceQUEUE_SEND( pxQueue );
\r
939 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
941 /* If there was a task waiting for data to arrive on the
\r
942 queue then unblock it now. */
\r
943 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
945 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
947 /* The unblocked task has a priority higher than
\r
948 our own so yield immediately. */
\r
949 portYIELD_WITHIN_API();
\r
953 mtCOVERAGE_TEST_MARKER();
\r
958 mtCOVERAGE_TEST_MARKER();
\r
961 taskEXIT_CRITICAL();
\r
966 if( xTicksToWait == ( TickType_t ) 0 )
\r
968 taskEXIT_CRITICAL();
\r
969 return errQUEUE_FULL;
\r
971 else if( xEntryTimeSet == pdFALSE )
\r
973 vTaskSetTimeOutState( &xTimeOut );
\r
974 xEntryTimeSet = pdTRUE;
\r
978 taskEXIT_CRITICAL();
\r
980 taskENTER_CRITICAL();
\r
982 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
984 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
986 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
987 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
988 portYIELD_WITHIN_API();
\r
992 mtCOVERAGE_TEST_MARKER();
\r
997 taskEXIT_CRITICAL();
\r
998 traceQUEUE_SEND_FAILED( pxQueue );
\r
999 return errQUEUE_FULL;
\r
1002 taskEXIT_CRITICAL();
\r
1006 #endif /* configUSE_ALTERNATIVE_API */
\r
1007 /*-----------------------------------------------------------*/
\r
1009 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
1011 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
1013 BaseType_t xEntryTimeSet = pdFALSE;
\r
1014 TimeOut_t xTimeOut;
\r
1015 int8_t *pcOriginalReadPosition;
\r
1016 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1018 configASSERT( pxQueue );
\r
1019 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1023 taskENTER_CRITICAL();
\r
1025 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1027 /* Remember our read position in case we are just peeking. */
\r
1028 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1030 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1032 if( xJustPeeking == pdFALSE )
\r
1034 traceQUEUE_RECEIVE( pxQueue );
\r
1036 /* Data is actually being removed (not just peeked). */
\r
1037 --( pxQueue->uxMessagesWaiting );
\r
1039 #if ( configUSE_MUTEXES == 1 )
\r
1041 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1043 /* Record the information required to implement
\r
1044 priority inheritance should it become necessary. */
\r
1045 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
1049 mtCOVERAGE_TEST_MARKER();
\r
1054 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1056 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1058 portYIELD_WITHIN_API();
\r
1062 mtCOVERAGE_TEST_MARKER();
\r
1068 traceQUEUE_PEEK( pxQueue );
\r
1070 /* The data is not being removed, so reset our read
\r
1072 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1074 /* The data is being left in the queue, so see if there are
\r
1075 any other tasks waiting for the data. */
\r
1076 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1078 /* Tasks that are removed from the event list will get added to
\r
1079 the pending ready list as the scheduler is still suspended. */
\r
1080 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1082 /* The task waiting has a higher priority than this task. */
\r
1083 portYIELD_WITHIN_API();
\r
1087 mtCOVERAGE_TEST_MARKER();
\r
1092 mtCOVERAGE_TEST_MARKER();
\r
1096 taskEXIT_CRITICAL();
\r
1101 if( xTicksToWait == ( TickType_t ) 0 )
\r
1103 taskEXIT_CRITICAL();
\r
1104 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1105 return errQUEUE_EMPTY;
\r
1107 else if( xEntryTimeSet == pdFALSE )
\r
1109 vTaskSetTimeOutState( &xTimeOut );
\r
1110 xEntryTimeSet = pdTRUE;
\r
1114 taskEXIT_CRITICAL();
\r
1116 taskENTER_CRITICAL();
\r
1118 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1120 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1122 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1124 #if ( configUSE_MUTEXES == 1 )
\r
1126 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1128 taskENTER_CRITICAL();
\r
1130 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1132 taskEXIT_CRITICAL();
\r
1136 mtCOVERAGE_TEST_MARKER();
\r
1141 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1142 portYIELD_WITHIN_API();
\r
1146 mtCOVERAGE_TEST_MARKER();
\r
1151 taskEXIT_CRITICAL();
\r
1152 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1153 return errQUEUE_EMPTY;
\r
1156 taskEXIT_CRITICAL();
\r
1161 #endif /* configUSE_ALTERNATIVE_API */
\r
1162 /*-----------------------------------------------------------*/
\r
1164 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1166 BaseType_t xReturn;
\r
1167 UBaseType_t uxSavedInterruptStatus;
\r
1168 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1170 configASSERT( pxQueue );
\r
1171 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1172 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1174 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1175 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1176 above the maximum system call priority are kept permanently enabled, even
\r
1177 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1178 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1179 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1180 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1181 assigned a priority above the configured maximum system call priority.
\r
1182 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1183 that have been assigned a priority at or (logically) below the maximum
\r
1184 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1185 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1186 More information (albeit Cortex-M specific) is provided on the following
\r
1187 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1188 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1190 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1191 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1192 read, instead return a flag to say whether a context switch is required or
\r
1193 not (i.e. has a task with a higher priority than us been woken by this
\r
1195 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1197 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1199 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1201 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
1202 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
1203 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
1204 called here even though the disinherit function does not check if
\r
1205 the scheduler is suspended before accessing the ready lists. */
\r
1206 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1208 /* The event list is not altered if the queue is locked. This will
\r
1209 be done when the queue is unlocked later. */
\r
1210 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1212 #if ( configUSE_QUEUE_SETS == 1 )
\r
1214 if( pxQueue->pxQueueSetContainer != NULL )
\r
1216 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1218 /* The queue is a member of a queue set, and posting
\r
1219 to the queue set caused a higher priority task to
\r
1220 unblock. A context switch is required. */
\r
1221 if( pxHigherPriorityTaskWoken != NULL )
\r
1223 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1227 mtCOVERAGE_TEST_MARKER();
\r
1232 mtCOVERAGE_TEST_MARKER();
\r
1237 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1239 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1241 /* The task waiting has a higher priority so
\r
1242 record that a context switch is required. */
\r
1243 if( pxHigherPriorityTaskWoken != NULL )
\r
1245 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1249 mtCOVERAGE_TEST_MARKER();
\r
1254 mtCOVERAGE_TEST_MARKER();
\r
1259 mtCOVERAGE_TEST_MARKER();
\r
1263 #else /* configUSE_QUEUE_SETS */
\r
1265 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1267 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1269 /* The task waiting has a higher priority so record that a
\r
1270 context switch is required. */
\r
1271 if( pxHigherPriorityTaskWoken != NULL )
\r
1273 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1277 mtCOVERAGE_TEST_MARKER();
\r
1282 mtCOVERAGE_TEST_MARKER();
\r
1287 mtCOVERAGE_TEST_MARKER();
\r
1290 #endif /* configUSE_QUEUE_SETS */
\r
1294 /* Increment the lock count so the task that unlocks the queue
\r
1295 knows that data was posted while it was locked. */
\r
1296 ++( pxQueue->xTxLock );
\r
1303 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1304 xReturn = errQUEUE_FULL;
\r
1307 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1311 /*-----------------------------------------------------------*/
\r
1313 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1315 BaseType_t xReturn;
\r
1316 UBaseType_t uxSavedInterruptStatus;
\r
1317 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1319 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1320 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1321 read, instead return a flag to say whether a context switch is required or
\r
1322 not (i.e. has a task with a higher priority than us been woken by this
\r
1325 configASSERT( pxQueue );
\r
1327 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1328 if the item size is not 0. */
\r
1329 configASSERT( pxQueue->uxItemSize == 0 );
\r
1331 /* Normally a mutex would not be given from an interrupt, especially if
\r
1332 there is a mutex holder, as priority inheritance makes no sense for an
\r
1333 interrupts, only tasks. */
\r
1334 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1336 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1337 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1338 above the maximum system call priority are kept permanently enabled, even
\r
1339 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1340 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1341 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1342 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1343 assigned a priority above the configured maximum system call priority.
\r
1344 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1345 that have been assigned a priority at or (logically) below the maximum
\r
1346 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1347 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1348 More information (albeit Cortex-M specific) is provided on the following
\r
1349 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1350 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1352 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1354 /* When the queue is used to implement a semaphore no data is ever
\r
1355 moved through the queue but it is still valid to see if the queue 'has
\r
1357 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1359 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1361 /* A task can only have an inherited priority if it is a mutex
\r
1362 holder - and if there is a mutex holder then the mutex cannot be
\r
1363 given from an ISR. As this is the ISR version of the function it
\r
1364 can be assumed there is no mutex holder and no need to determine if
\r
1365 priority disinheritance is needed. Simply increase the count of
\r
1366 messages (semaphores) available. */
\r
1367 ++( pxQueue->uxMessagesWaiting );
\r
1369 /* The event list is not altered if the queue is locked. This will
\r
1370 be done when the queue is unlocked later. */
\r
1371 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1373 #if ( configUSE_QUEUE_SETS == 1 )
\r
1375 if( pxQueue->pxQueueSetContainer != NULL )
\r
1377 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1379 /* The semaphore is a member of a queue set, and
\r
1380 posting to the queue set caused a higher priority
\r
1381 task to unblock. A context switch is required. */
\r
1382 if( pxHigherPriorityTaskWoken != NULL )
\r
1384 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1388 mtCOVERAGE_TEST_MARKER();
\r
1393 mtCOVERAGE_TEST_MARKER();
\r
1398 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1400 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1402 /* The task waiting has a higher priority so
\r
1403 record that a context switch is required. */
\r
1404 if( pxHigherPriorityTaskWoken != NULL )
\r
1406 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1410 mtCOVERAGE_TEST_MARKER();
\r
1415 mtCOVERAGE_TEST_MARKER();
\r
1420 mtCOVERAGE_TEST_MARKER();
\r
1424 #else /* configUSE_QUEUE_SETS */
\r
1426 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1428 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1430 /* The task waiting has a higher priority so record that a
\r
1431 context switch is required. */
\r
1432 if( pxHigherPriorityTaskWoken != NULL )
\r
1434 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1438 mtCOVERAGE_TEST_MARKER();
\r
1443 mtCOVERAGE_TEST_MARKER();
\r
1448 mtCOVERAGE_TEST_MARKER();
\r
1451 #endif /* configUSE_QUEUE_SETS */
\r
1455 /* Increment the lock count so the task that unlocks the queue
\r
1456 knows that data was posted while it was locked. */
\r
1457 ++( pxQueue->xTxLock );
\r
1464 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1465 xReturn = errQUEUE_FULL;
\r
1468 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1472 /*-----------------------------------------------------------*/
\r
1474 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1476 BaseType_t xEntryTimeSet = pdFALSE;
\r
1477 TimeOut_t xTimeOut;
\r
1478 int8_t *pcOriginalReadPosition;
\r
1479 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1481 configASSERT( pxQueue );
\r
1482 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1483 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1485 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1489 /* This function relaxes the coding standard somewhat to allow return
\r
1490 statements within the function itself. This is done in the interest
\r
1491 of execution time efficiency. */
\r
1495 taskENTER_CRITICAL();
\r
1497 /* Is there data in the queue now? To be running the calling task
\r
1498 must be the highest priority task wanting to access the queue. */
\r
1499 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1501 /* Remember the read position in case the queue is only being
\r
1503 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1505 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1507 if( xJustPeeking == pdFALSE )
\r
1509 traceQUEUE_RECEIVE( pxQueue );
\r
1511 /* Actually removing data, not just peeking. */
\r
1512 --( pxQueue->uxMessagesWaiting );
\r
1514 #if ( configUSE_MUTEXES == 1 )
\r
1516 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1518 /* Record the information required to implement
\r
1519 priority inheritance should it become necessary. */
\r
1520 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1524 mtCOVERAGE_TEST_MARKER();
\r
1527 #endif /* configUSE_MUTEXES */
\r
1529 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1531 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1533 queueYIELD_IF_USING_PREEMPTION();
\r
1537 mtCOVERAGE_TEST_MARKER();
\r
1542 mtCOVERAGE_TEST_MARKER();
\r
1547 traceQUEUE_PEEK( pxQueue );
\r
1549 /* The data is not being removed, so reset the read
\r
1551 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1553 /* The data is being left in the queue, so see if there are
\r
1554 any other tasks waiting for the data. */
\r
1555 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1557 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1559 /* The task waiting has a higher priority than this task. */
\r
1560 queueYIELD_IF_USING_PREEMPTION();
\r
1564 mtCOVERAGE_TEST_MARKER();
\r
1569 mtCOVERAGE_TEST_MARKER();
\r
1573 taskEXIT_CRITICAL();
\r
1578 if( xTicksToWait == ( TickType_t ) 0 )
\r
1580 /* The queue was empty and no block time is specified (or
\r
1581 the block time has expired) so leave now. */
\r
1582 taskEXIT_CRITICAL();
\r
1583 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1584 return errQUEUE_EMPTY;
\r
1586 else if( xEntryTimeSet == pdFALSE )
\r
1588 /* The queue was empty and a block time was specified so
\r
1589 configure the timeout structure. */
\r
1590 vTaskSetTimeOutState( &xTimeOut );
\r
1591 xEntryTimeSet = pdTRUE;
\r
1595 /* Entry time was already set. */
\r
1596 mtCOVERAGE_TEST_MARKER();
\r
1600 taskEXIT_CRITICAL();
\r
1602 /* Interrupts and other tasks can send to and receive from the queue
\r
1603 now the critical section has been exited. */
\r
1605 vTaskSuspendAll();
\r
1606 prvLockQueue( pxQueue );
\r
1608 /* Update the timeout state to see if it has expired yet. */
\r
1609 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1611 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1613 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1615 #if ( configUSE_MUTEXES == 1 )
\r
1617 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1619 taskENTER_CRITICAL();
\r
1621 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1623 taskEXIT_CRITICAL();
\r
1627 mtCOVERAGE_TEST_MARKER();
\r
1632 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1633 prvUnlockQueue( pxQueue );
\r
1634 if( xTaskResumeAll() == pdFALSE )
\r
1636 portYIELD_WITHIN_API();
\r
1640 mtCOVERAGE_TEST_MARKER();
\r
1646 prvUnlockQueue( pxQueue );
\r
1647 ( void ) xTaskResumeAll();
\r
1652 prvUnlockQueue( pxQueue );
\r
1653 ( void ) xTaskResumeAll();
\r
1655 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1657 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1658 return errQUEUE_EMPTY;
\r
1662 mtCOVERAGE_TEST_MARKER();
\r
1667 /*-----------------------------------------------------------*/
\r
1669 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1671 BaseType_t xReturn;
\r
1672 UBaseType_t uxSavedInterruptStatus;
\r
1673 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1675 configASSERT( pxQueue );
\r
1676 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1678 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1679 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1680 above the maximum system call priority are kept permanently enabled, even
\r
1681 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1682 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1683 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1684 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1685 assigned a priority above the configured maximum system call priority.
\r
1686 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1687 that have been assigned a priority at or (logically) below the maximum
\r
1688 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1689 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1690 More information (albeit Cortex-M specific) is provided on the following
\r
1691 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1692 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1694 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1696 /* Cannot block in an ISR, so check there is data available. */
\r
1697 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1699 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1701 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1702 --( pxQueue->uxMessagesWaiting );
\r
1704 /* If the queue is locked the event list will not be modified.
\r
1705 Instead update the lock count so the task that unlocks the queue
\r
1706 will know that an ISR has removed data while the queue was
\r
1708 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1710 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1712 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1714 /* The task waiting has a higher priority than us so
\r
1715 force a context switch. */
\r
1716 if( pxHigherPriorityTaskWoken != NULL )
\r
1718 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1722 mtCOVERAGE_TEST_MARKER();
\r
1727 mtCOVERAGE_TEST_MARKER();
\r
1732 mtCOVERAGE_TEST_MARKER();
\r
1737 /* Increment the lock count so the task that unlocks the queue
\r
1738 knows that data was removed while it was locked. */
\r
1739 ++( pxQueue->xRxLock );
\r
1747 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1750 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1754 /*-----------------------------------------------------------*/
\r
1756 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1758 BaseType_t xReturn;
\r
1759 UBaseType_t uxSavedInterruptStatus;
\r
1760 int8_t *pcOriginalReadPosition;
\r
1761 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1763 configASSERT( pxQueue );
\r
1764 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1765 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1767 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1768 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1769 above the maximum system call priority are kept permanently enabled, even
\r
1770 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1771 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1772 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1773 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1774 assigned a priority above the configured maximum system call priority.
\r
1775 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1776 that have been assigned a priority at or (logically) below the maximum
\r
1777 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1778 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1779 More information (albeit Cortex-M specific) is provided on the following
\r
1780 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1781 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1783 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1785 /* Cannot block in an ISR, so check there is data available. */
\r
1786 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1788 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1790 /* Remember the read position so it can be reset as nothing is
\r
1791 actually being removed from the queue. */
\r
1792 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1793 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1794 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1801 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1804 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1808 /*-----------------------------------------------------------*/
\r
1810 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1812 UBaseType_t uxReturn;
\r
1814 configASSERT( xQueue );
\r
1816 taskENTER_CRITICAL();
\r
1818 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1820 taskEXIT_CRITICAL();
\r
1823 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1824 /*-----------------------------------------------------------*/
\r
1826 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1828 UBaseType_t uxReturn;
\r
1831 pxQueue = ( Queue_t * ) xQueue;
\r
1832 configASSERT( pxQueue );
\r
1834 taskENTER_CRITICAL();
\r
1836 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1838 taskEXIT_CRITICAL();
\r
1841 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1842 /*-----------------------------------------------------------*/
\r
1844 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1846 UBaseType_t uxReturn;
\r
1848 configASSERT( xQueue );
\r
1850 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1853 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1854 /*-----------------------------------------------------------*/
\r
1856 void vQueueDelete( QueueHandle_t xQueue )
\r
1858 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1860 configASSERT( pxQueue );
\r
1862 traceQUEUE_DELETE( pxQueue );
\r
1863 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1865 vQueueUnregisterQueue( pxQueue );
\r
1869 #if( configSUPPORT_STATIC_ALLOCATION == 0 )
\r
1871 /* The queue and the queue storage area will have been dynamically
\r
1872 allocated in one go. */
\r
1873 vPortFree( pxQueue );
\r
1877 if( ( pxQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_STORAGE ) == 0 )
\r
1879 /* The queue storage area was dynamically allocated, so must be
\r
1881 vPortFree( pxQueue->pcHead );
\r
1885 mtCOVERAGE_TEST_MARKER();
\r
1888 if( ( pxQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ) == 0 )
\r
1890 /* The queue structure was dynamically allocated, so must be
\r
1892 vPortFree( pxQueue );
\r
1896 mtCOVERAGE_TEST_MARKER();
\r
1901 /*-----------------------------------------------------------*/
\r
1903 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1905 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1907 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1910 #endif /* configUSE_TRACE_FACILITY */
\r
1911 /*-----------------------------------------------------------*/
\r
1913 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1915 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1917 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1920 #endif /* configUSE_TRACE_FACILITY */
\r
1921 /*-----------------------------------------------------------*/
\r
1923 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1925 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1927 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1930 #endif /* configUSE_TRACE_FACILITY */
\r
1931 /*-----------------------------------------------------------*/
\r
1933 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1935 BaseType_t xReturn = pdFALSE;
\r
1937 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1939 #if ( configUSE_MUTEXES == 1 )
\r
1941 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1943 /* The mutex is no longer being held. */
\r
1944 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1945 pxQueue->pxMutexHolder = NULL;
\r
1949 mtCOVERAGE_TEST_MARKER();
\r
1952 #endif /* configUSE_MUTEXES */
\r
1954 else if( xPosition == queueSEND_TO_BACK )
\r
1956 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1957 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1958 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1960 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1964 mtCOVERAGE_TEST_MARKER();
\r
1969 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1970 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1971 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1973 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1977 mtCOVERAGE_TEST_MARKER();
\r
1980 if( xPosition == queueOVERWRITE )
\r
1982 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1984 /* An item is not being added but overwritten, so subtract
\r
1985 one from the recorded number of items in the queue so when
\r
1986 one is added again below the number of recorded items remains
\r
1988 --( pxQueue->uxMessagesWaiting );
\r
1992 mtCOVERAGE_TEST_MARKER();
\r
1997 mtCOVERAGE_TEST_MARKER();
\r
2001 ++( pxQueue->uxMessagesWaiting );
\r
2005 /*-----------------------------------------------------------*/
\r
2007 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
2009 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
2011 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2012 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
2014 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2018 mtCOVERAGE_TEST_MARKER();
\r
2020 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
2023 /*-----------------------------------------------------------*/
\r
2025 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
2027 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
2029 /* The lock counts contains the number of extra data items placed or
\r
2030 removed from the queue while the queue was locked. When a queue is
\r
2031 locked items can be added or removed, but the event lists cannot be
\r
2033 taskENTER_CRITICAL();
\r
2035 /* See if data was added to the queue while it was locked. */
\r
2036 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
2038 /* Data was posted while the queue was locked. Are any tasks
\r
2039 blocked waiting for data to become available? */
\r
2040 #if ( configUSE_QUEUE_SETS == 1 )
\r
2042 if( pxQueue->pxQueueSetContainer != NULL )
\r
2044 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
2046 /* The queue is a member of a queue set, and posting to
\r
2047 the queue set caused a higher priority task to unblock.
\r
2048 A context switch is required. */
\r
2049 vTaskMissedYield();
\r
2053 mtCOVERAGE_TEST_MARKER();
\r
2058 /* Tasks that are removed from the event list will get added to
\r
2059 the pending ready list as the scheduler is still suspended. */
\r
2060 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2062 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2064 /* The task waiting has a higher priority so record that a
\r
2065 context switch is required. */
\r
2066 vTaskMissedYield();
\r
2070 mtCOVERAGE_TEST_MARKER();
\r
2079 #else /* configUSE_QUEUE_SETS */
\r
2081 /* Tasks that are removed from the event list will get added to
\r
2082 the pending ready list as the scheduler is still suspended. */
\r
2083 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2085 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2087 /* The task waiting has a higher priority so record that
\r
2088 a context switch is required. */
\r
2089 vTaskMissedYield();
\r
2093 mtCOVERAGE_TEST_MARKER();
\r
2101 #endif /* configUSE_QUEUE_SETS */
\r
2103 --( pxQueue->xTxLock );
\r
2106 pxQueue->xTxLock = queueUNLOCKED;
\r
2108 taskEXIT_CRITICAL();
\r
2110 /* Do the same for the Rx lock. */
\r
2111 taskENTER_CRITICAL();
\r
2113 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
2115 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2117 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2119 vTaskMissedYield();
\r
2123 mtCOVERAGE_TEST_MARKER();
\r
2126 --( pxQueue->xRxLock );
\r
2134 pxQueue->xRxLock = queueUNLOCKED;
\r
2136 taskEXIT_CRITICAL();
\r
2138 /*-----------------------------------------------------------*/
\r
2140 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2142 BaseType_t xReturn;
\r
2144 taskENTER_CRITICAL();
\r
2146 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2152 xReturn = pdFALSE;
\r
2155 taskEXIT_CRITICAL();
\r
2159 /*-----------------------------------------------------------*/
\r
2161 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2163 BaseType_t xReturn;
\r
2165 configASSERT( xQueue );
\r
2166 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2172 xReturn = pdFALSE;
\r
2176 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2177 /*-----------------------------------------------------------*/
\r
2179 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2181 BaseType_t xReturn;
\r
2183 taskENTER_CRITICAL();
\r
2185 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2191 xReturn = pdFALSE;
\r
2194 taskEXIT_CRITICAL();
\r
2198 /*-----------------------------------------------------------*/
\r
2200 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2202 BaseType_t xReturn;
\r
2204 configASSERT( xQueue );
\r
2205 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2211 xReturn = pdFALSE;
\r
2215 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2216 /*-----------------------------------------------------------*/
\r
2218 #if ( configUSE_CO_ROUTINES == 1 )
\r
2220 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2222 BaseType_t xReturn;
\r
2223 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2225 /* If the queue is already full we may have to block. A critical section
\r
2226 is required to prevent an interrupt removing something from the queue
\r
2227 between the check to see if the queue is full and blocking on the queue. */
\r
2228 portDISABLE_INTERRUPTS();
\r
2230 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2232 /* The queue is full - do we want to block or just leave without
\r
2234 if( xTicksToWait > ( TickType_t ) 0 )
\r
2236 /* As this is called from a coroutine we cannot block directly, but
\r
2237 return indicating that we need to block. */
\r
2238 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2239 portENABLE_INTERRUPTS();
\r
2240 return errQUEUE_BLOCKED;
\r
2244 portENABLE_INTERRUPTS();
\r
2245 return errQUEUE_FULL;
\r
2249 portENABLE_INTERRUPTS();
\r
2251 portDISABLE_INTERRUPTS();
\r
2253 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2255 /* There is room in the queue, copy the data into the queue. */
\r
2256 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2259 /* Were any co-routines waiting for data to become available? */
\r
2260 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2262 /* In this instance the co-routine could be placed directly
\r
2263 into the ready list as we are within a critical section.
\r
2264 Instead the same pending ready list mechanism is used as if
\r
2265 the event were caused from within an interrupt. */
\r
2266 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2268 /* The co-routine waiting has a higher priority so record
\r
2269 that a yield might be appropriate. */
\r
2270 xReturn = errQUEUE_YIELD;
\r
2274 mtCOVERAGE_TEST_MARKER();
\r
2279 mtCOVERAGE_TEST_MARKER();
\r
2284 xReturn = errQUEUE_FULL;
\r
2287 portENABLE_INTERRUPTS();
\r
2292 #endif /* configUSE_CO_ROUTINES */
\r
2293 /*-----------------------------------------------------------*/
\r
2295 #if ( configUSE_CO_ROUTINES == 1 )
\r
2297 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2299 BaseType_t xReturn;
\r
2300 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2302 /* If the queue is already empty we may have to block. A critical section
\r
2303 is required to prevent an interrupt adding something to the queue
\r
2304 between the check to see if the queue is empty and blocking on the queue. */
\r
2305 portDISABLE_INTERRUPTS();
\r
2307 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2309 /* There are no messages in the queue, do we want to block or just
\r
2310 leave with nothing? */
\r
2311 if( xTicksToWait > ( TickType_t ) 0 )
\r
2313 /* As this is a co-routine we cannot block directly, but return
\r
2314 indicating that we need to block. */
\r
2315 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2316 portENABLE_INTERRUPTS();
\r
2317 return errQUEUE_BLOCKED;
\r
2321 portENABLE_INTERRUPTS();
\r
2322 return errQUEUE_FULL;
\r
2327 mtCOVERAGE_TEST_MARKER();
\r
2330 portENABLE_INTERRUPTS();
\r
2332 portDISABLE_INTERRUPTS();
\r
2334 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2336 /* Data is available from the queue. */
\r
2337 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2338 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2340 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2344 mtCOVERAGE_TEST_MARKER();
\r
2346 --( pxQueue->uxMessagesWaiting );
\r
2347 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2351 /* Were any co-routines waiting for space to become available? */
\r
2352 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2354 /* In this instance the co-routine could be placed directly
\r
2355 into the ready list as we are within a critical section.
\r
2356 Instead the same pending ready list mechanism is used as if
\r
2357 the event were caused from within an interrupt. */
\r
2358 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2360 xReturn = errQUEUE_YIELD;
\r
2364 mtCOVERAGE_TEST_MARKER();
\r
2369 mtCOVERAGE_TEST_MARKER();
\r
2377 portENABLE_INTERRUPTS();
\r
2382 #endif /* configUSE_CO_ROUTINES */
\r
2383 /*-----------------------------------------------------------*/
\r
2385 #if ( configUSE_CO_ROUTINES == 1 )
\r
2387 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2389 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2391 /* Cannot block within an ISR so if there is no space on the queue then
\r
2392 exit without doing anything. */
\r
2393 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2395 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2397 /* We only want to wake one co-routine per ISR, so check that a
\r
2398 co-routine has not already been woken. */
\r
2399 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2401 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2403 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2409 mtCOVERAGE_TEST_MARKER();
\r
2414 mtCOVERAGE_TEST_MARKER();
\r
2419 mtCOVERAGE_TEST_MARKER();
\r
2424 mtCOVERAGE_TEST_MARKER();
\r
2427 return xCoRoutinePreviouslyWoken;
\r
2430 #endif /* configUSE_CO_ROUTINES */
\r
2431 /*-----------------------------------------------------------*/
\r
2433 #if ( configUSE_CO_ROUTINES == 1 )
\r
2435 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2437 BaseType_t xReturn;
\r
2438 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2440 /* We cannot block from an ISR, so check there is data available. If
\r
2441 not then just leave without doing anything. */
\r
2442 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2444 /* Copy the data from the queue. */
\r
2445 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2446 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2448 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2452 mtCOVERAGE_TEST_MARKER();
\r
2454 --( pxQueue->uxMessagesWaiting );
\r
2455 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2457 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2459 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2461 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2463 *pxCoRoutineWoken = pdTRUE;
\r
2467 mtCOVERAGE_TEST_MARKER();
\r
2472 mtCOVERAGE_TEST_MARKER();
\r
2477 mtCOVERAGE_TEST_MARKER();
\r
2490 #endif /* configUSE_CO_ROUTINES */
\r
2491 /*-----------------------------------------------------------*/
\r
2493 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2495 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2499 /* See if there is an empty space in the registry. A NULL name denotes
\r
2501 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2503 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2505 /* Store the information on this queue. */
\r
2506 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2507 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2509 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2514 mtCOVERAGE_TEST_MARKER();
\r
2519 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2520 /*-----------------------------------------------------------*/
\r
2522 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2524 const char *pcQueueGetQueueName( QueueHandle_t xQueue )
\r
2527 const char *pcReturn = NULL;
\r
2529 /* Note there is nothing here to protect against another task adding or
\r
2530 removing entries from the registry while it is being searched. */
\r
2531 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2533 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2535 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2540 mtCOVERAGE_TEST_MARKER();
\r
2547 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2548 /*-----------------------------------------------------------*/
\r
2550 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2552 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2556 /* See if the handle of the queue being unregistered in actually in the
\r
2558 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2560 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2562 /* Set the name to NULL to show that this slot if free again. */
\r
2563 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2565 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2566 appear in the registry twice if it is added, removed, then
\r
2568 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2573 mtCOVERAGE_TEST_MARKER();
\r
2577 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2579 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2580 /*-----------------------------------------------------------*/
\r
2582 #if ( configUSE_TIMERS == 1 )
\r
2584 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2586 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2588 /* This function should not be called by application code hence the
\r
2589 'Restricted' in its name. It is not part of the public API. It is
\r
2590 designed for use by kernel code, and has special calling requirements.
\r
2591 It can result in vListInsert() being called on a list that can only
\r
2592 possibly ever have one item in it, so the list will be fast, but even
\r
2593 so it should be called with the scheduler locked and not from a critical
\r
2596 /* Only do anything if there are no messages in the queue. This function
\r
2597 will not actually cause the task to block, just place it on a blocked
\r
2598 list. It will not block until the scheduler is unlocked - at which
\r
2599 time a yield will be performed. If an item is added to the queue while
\r
2600 the queue is locked, and the calling task blocks on the queue, then the
\r
2601 calling task will be immediately unblocked when the queue is unlocked. */
\r
2602 prvLockQueue( pxQueue );
\r
2603 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2605 /* There is nothing in the queue, block for the specified period. */
\r
2606 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2610 mtCOVERAGE_TEST_MARKER();
\r
2612 prvUnlockQueue( pxQueue );
\r
2615 #endif /* configUSE_TIMERS */
\r
2616 /*-----------------------------------------------------------*/
\r
2618 #if ( configUSE_QUEUE_SETS == 1 )
\r
2620 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2622 QueueSetHandle_t pxQueue;
\r
2624 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), NULL, NULL, queueQUEUE_TYPE_SET );
\r
2629 #endif /* configUSE_QUEUE_SETS */
\r
2630 /*-----------------------------------------------------------*/
\r
2632 #if ( configUSE_QUEUE_SETS == 1 )
\r
2634 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2636 BaseType_t xReturn;
\r
2638 taskENTER_CRITICAL();
\r
2640 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2642 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2645 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2647 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2648 items in the queue/semaphore. */
\r
2653 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2657 taskEXIT_CRITICAL();
\r
2662 #endif /* configUSE_QUEUE_SETS */
\r
2663 /*-----------------------------------------------------------*/
\r
2665 #if ( configUSE_QUEUE_SETS == 1 )
\r
2667 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2669 BaseType_t xReturn;
\r
2670 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2672 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2674 /* The queue was not a member of the set. */
\r
2677 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2679 /* It is dangerous to remove a queue from a set when the queue is
\r
2680 not empty because the queue set will still hold pending events for
\r
2686 taskENTER_CRITICAL();
\r
2688 /* The queue is no longer contained in the set. */
\r
2689 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2691 taskEXIT_CRITICAL();
\r
2696 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2698 #endif /* configUSE_QUEUE_SETS */
\r
2699 /*-----------------------------------------------------------*/
\r
2701 #if ( configUSE_QUEUE_SETS == 1 )
\r
2703 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2705 QueueSetMemberHandle_t xReturn = NULL;
\r
2707 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2711 #endif /* configUSE_QUEUE_SETS */
\r
2712 /*-----------------------------------------------------------*/
\r
2714 #if ( configUSE_QUEUE_SETS == 1 )
\r
2716 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2718 QueueSetMemberHandle_t xReturn = NULL;
\r
2720 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2724 #endif /* configUSE_QUEUE_SETS */
\r
2725 /*-----------------------------------------------------------*/
\r
2727 #if ( configUSE_QUEUE_SETS == 1 )
\r
2729 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2731 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2732 BaseType_t xReturn = pdFALSE;
\r
2734 /* This function must be called form a critical section. */
\r
2736 configASSERT( pxQueueSetContainer );
\r
2737 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2739 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2741 traceQUEUE_SEND( pxQueueSetContainer );
\r
2743 /* The data copied is the handle of the queue that contains data. */
\r
2744 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2746 if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
\r
2748 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2750 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2752 /* The task waiting has a higher priority. */
\r
2757 mtCOVERAGE_TEST_MARKER();
\r
2762 mtCOVERAGE_TEST_MARKER();
\r
2767 ( pxQueueSetContainer->xTxLock )++;
\r
2772 mtCOVERAGE_TEST_MARKER();
\r
2778 #endif /* configUSE_QUEUE_SETS */
\r