2 FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
\r
13 ***************************************************************************
\r
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
15 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
16 >>! obliged to provide the source code for proprietary components !<<
\r
17 >>! outside of the FreeRTOS kernel. !<<
\r
18 ***************************************************************************
\r
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
23 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * FreeRTOS provides completely free yet professionally developed, *
\r
28 * robust, strictly quality controlled, supported, and cross *
\r
29 * platform software that is more than just the market leader, it *
\r
30 * is the industry's de facto standard. *
\r
32 * Help yourself get started quickly while simultaneously helping *
\r
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
34 * tutorial book, reference manual, or both: *
\r
35 * http://www.FreeRTOS.org/Documentation *
\r
37 ***************************************************************************
\r
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
\r
40 the FAQ page "My application does not run, what could be wrong?". Have you
\r
41 defined configASSERT()?
\r
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
\r
44 embedded software for free we request you assist our global community by
\r
45 participating in the support forum.
\r
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
\r
48 be as productive as possible as early as possible. Now you can receive
\r
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
\r
50 Ltd, and the world's leading authority on the world's leading RTOS.
\r
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
\r
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
61 licenses offer ticketed support, indemnification and commercial middleware.
\r
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
64 engineered and independently SIL3 certified version for use in safety and
\r
65 mission critical applications that require provable dependability.
\r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
74 all the API functions to use the MPU wrappers. That should only be done when
\r
75 task.h is included from an application file. */
\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
78 #include "FreeRTOS.h"
\r
82 #if ( configUSE_CO_ROUTINES == 1 )
\r
83 #include "croutine.h"
\r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
88 header files above, but not in this file, in order to generate the correct
\r
89 privileged Vs unprivileged linkage and placement. */
\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
93 /* Constants used with the xRxLock and xTxLock structure members. */
\r
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
98 pcTail members are used as pointers into the queue storage area. When the
\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
102 names to the pcHead and pcTail structure members to ensure the readability of
\r
103 the code is maintained despite this dual use of two structure members. An
\r
104 alternative implementation would be to use a union, but use of a union is
\r
105 against the coding standard (although an exception to the standard has been
\r
106 permitted where the dual use also significantly changes the type of the
\r
107 structure member). */
\r
108 #define pxMutexHolder pcTail
\r
109 #define uxQueueType pcHead
\r
110 #define queueQUEUE_IS_MUTEX NULL
\r
112 /* Semaphores do not actually store or copy data, so have an item size of
\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
117 /* Bits that can be set in xQUEUE->ucStaticAllocationFlags to indicate that the
\r
118 queue storage area and queue structure were statically allocated respectively.
\r
119 When these are statically allocated they won't be freed if the queue gets
\r
121 #define queueSTATICALLY_ALLOCATED_STORAGE ( ( uint8_t ) 0x01 )
\r
122 #define queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ( ( uint8_t ) 0x02 )
\r
124 #if( configUSE_PREEMPTION == 0 )
\r
125 /* If the cooperative scheduler is being used then a yield should not be
\r
126 performed just because a higher priority task has been woken. */
\r
127 #define queueYIELD_IF_USING_PREEMPTION()
\r
129 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
133 * Definition of the queue used by the scheduler.
\r
134 * Items are queued by copy, not reference. See the following link for the
\r
135 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
137 typedef struct QueueDefinition
\r
139 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
140 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
141 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
143 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
145 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
146 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
149 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
150 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
152 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
153 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
154 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
156 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
157 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
159 #if ( configUSE_QUEUE_SETS == 1 )
\r
160 struct QueueDefinition *pxQueueSetContainer;
\r
163 #if ( configUSE_TRACE_FACILITY == 1 )
\r
164 UBaseType_t uxQueueNumber;
\r
165 uint8_t ucQueueType;
\r
168 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
169 uint8_t ucStaticAllocationFlags;
\r
174 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
175 name below to enable the use of older kernel aware debuggers. */
\r
176 typedef xQUEUE Queue_t;
\r
178 /*-----------------------------------------------------------*/
\r
181 * The queue registry is just a means for kernel aware debuggers to locate
\r
182 * queue structures. It has no other purpose so is an optional component.
\r
184 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
186 /* The type stored within the queue registry array. This allows a name
\r
187 to be assigned to each queue making kernel aware debugging a little
\r
188 more user friendly. */
\r
189 typedef struct QUEUE_REGISTRY_ITEM
\r
191 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
192 QueueHandle_t xHandle;
\r
193 } xQueueRegistryItem;
\r
195 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
196 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
198 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
200 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
201 The pcQueueName member of a structure being NULL is indicative of the
\r
202 array position being vacant. */
\r
203 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
205 #endif /* configQUEUE_REGISTRY_SIZE */
\r
208 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
209 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
210 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
211 * queue is locked it will instead increment the appropriate queue lock count
\r
212 * to indicate that a task may require unblocking. When the queue in unlocked
\r
213 * these lock counts are inspected, and the appropriate action taken.
\r
215 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
218 * Uses a critical section to determine if there is any data in a queue.
\r
220 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
222 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
225 * Uses a critical section to determine if there is any space in a queue.
\r
227 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
229 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
232 * Copies an item into the queue, either at the front of the queue or the
\r
233 * back of the queue.
\r
235 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
238 * Copies an item out of a queue.
\r
240 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
243 * A queue requires two blocks of memory; a structure to hold the queue state
\r
244 * and a storage area to hold the items in the queue. The memory is assigned
\r
245 * by prvAllocateQueueMemory(). If ppucQueueStorage is NULL then the queue
\r
246 * storage will allocated dynamically, otherwise the buffer passed in
\r
247 * ppucQueueStorage will be used. If pxStaticQueue is NULL then the queue
\r
248 * structure will be allocated dynamically, otherwise the buffer pointed to by
\r
249 * pxStaticQueue will be used.
\r
251 static Queue_t *prvAllocateQueueMemory( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t **ppucQueueStorage, StaticQueue_t *pxStaticQueue );
\r
253 #if ( configUSE_QUEUE_SETS == 1 )
\r
255 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
256 * the queue set that the queue contains data.
\r
258 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
261 /*-----------------------------------------------------------*/
\r
264 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
265 * accessing the queue event lists.
\r
267 #define prvLockQueue( pxQueue ) \
\r
268 taskENTER_CRITICAL(); \
\r
270 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
272 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
274 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
276 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
279 taskEXIT_CRITICAL()
\r
280 /*-----------------------------------------------------------*/
\r
282 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
284 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
286 configASSERT( pxQueue );
\r
288 taskENTER_CRITICAL();
\r
290 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
291 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
292 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
293 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
294 pxQueue->xRxLock = queueUNLOCKED;
\r
295 pxQueue->xTxLock = queueUNLOCKED;
\r
297 if( xNewQueue == pdFALSE )
\r
299 /* If there are tasks blocked waiting to read from the queue, then
\r
300 the tasks will remain blocked as after this function exits the queue
\r
301 will still be empty. If there are tasks blocked waiting to write to
\r
302 the queue, then one should be unblocked as after this function exits
\r
303 it will be possible to write to it. */
\r
304 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
306 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
308 queueYIELD_IF_USING_PREEMPTION();
\r
312 mtCOVERAGE_TEST_MARKER();
\r
317 mtCOVERAGE_TEST_MARKER();
\r
322 /* Ensure the event queues start in the correct state. */
\r
323 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
324 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
327 taskEXIT_CRITICAL();
\r
329 /* A value is returned for calling semantic consistency with previous
\r
333 /*-----------------------------------------------------------*/
\r
335 static Queue_t *prvAllocateQueueMemory( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t **ppucQueueStorage, StaticQueue_t *pxStaticQueue )
\r
337 Queue_t *pxNewQueue;
\r
338 size_t xQueueSizeInBytes;
\r
340 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
342 #if( ( configASSERT_DEFINED == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
344 /* Sanity check that the size of the structure used to declare a
\r
345 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
\r
346 the real queue and semaphore structures. */
\r
347 volatile size_t xSize = sizeof( StaticQueue_t );
\r
348 configASSERT( xSize == sizeof( Queue_t ) );
\r
350 #endif /* configASSERT_DEFINED */
\r
352 if( uxItemSize == ( UBaseType_t ) 0 )
\r
354 /* There is not going to be a queue storage area. */
\r
355 xQueueSizeInBytes = ( size_t ) 0;
\r
359 /* Allocate enough space to hold the maximum number of items that can be
\r
360 in the queue at any time. */
\r
361 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
364 #if( configSUPPORT_STATIC_ALLOCATION == 0 )
\r
366 /* Allocate the new queue structure and storage area. */
\r
367 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
369 if( pxNewQueue != NULL )
\r
371 /* Jump past the queue structure to find the location of the queue
\r
373 *ppucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
376 /* The pxStaticQueue parameter is not used. Remove compiler warnings. */
\r
377 ( void ) pxStaticQueue;
\r
381 if( pxStaticQueue == NULL )
\r
383 /* A statically allocated queue was not passed in, so create one
\r
385 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
386 pxNewQueue->ucStaticAllocationFlags = 0;
\r
390 /* The address of a statically allocated queue was passed in, use
\r
391 it and note that the queue was not dynamically allocated so there is
\r
392 no attempt to free it again should the queue be deleted. */
\r
393 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
394 pxNewQueue->ucStaticAllocationFlags = queueSTATICALLY_ALLOCATED_QUEUE_STRUCT;
\r
397 if( pxNewQueue != NULL )
\r
399 if( ( *ppucQueueStorage == NULL ) && ( xQueueSizeInBytes > ( size_t ) 0 ) )
\r
401 /* A statically allocated queue storage area was not passed in,
\r
402 so allocate the queue storage area dynamically. */
\r
403 *ppucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
\r
405 if( *ppucQueueStorage == NULL )
\r
407 /* The queue storage area could not be created, so free the
\r
408 queue structure also. */
\r
409 if( ( pxNewQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ) == 0 )
\r
411 vPortFree( ( void * ) pxNewQueue );
\r
415 mtCOVERAGE_TEST_MARKER();
\r
422 mtCOVERAGE_TEST_MARKER();
\r
427 /* Note the fact that either the queue storage area was passed
\r
428 into this function, or the size requirement for the queue
\r
429 storage area was zero - either way no attempt should be made to
\r
430 free the queue storage area if the queue is deleted. */
\r
431 pxNewQueue->ucStaticAllocationFlags |= queueSTATICALLY_ALLOCATED_STORAGE;
\r
439 /*-----------------------------------------------------------*/
\r
441 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
\r
443 Queue_t *pxNewQueue;
\r
445 /* Remove compiler warnings about unused parameters should
\r
446 configUSE_TRACE_FACILITY not be set to 1. */
\r
447 ( void ) ucQueueType;
\r
449 /* A queue requires a queue structure and a queue storage area. These may
\r
450 be allocated statically or dynamically, depending on the parameter
\r
452 pxNewQueue = prvAllocateQueueMemory( uxQueueLength, uxItemSize, &pucQueueStorage, pxStaticQueue );
\r
454 if( pxNewQueue != NULL )
\r
456 if( uxItemSize == ( UBaseType_t ) 0 )
\r
458 /* No RAM was allocated for the queue storage area, but PC head
\r
459 cannot be set to NULL because NULL is used as a key to say the queue
\r
460 is used as a mutex. Therefore just set pcHead to point to the queue
\r
461 as a benign value that is known to be within the memory map. */
\r
462 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
466 /* Set the head to the start of the queue storage area. */
\r
467 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
\r
470 /* Initialise the queue members as described where the queue type is
\r
472 pxNewQueue->uxLength = uxQueueLength;
\r
473 pxNewQueue->uxItemSize = uxItemSize;
\r
474 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
476 #if ( configUSE_TRACE_FACILITY == 1 )
\r
478 pxNewQueue->ucQueueType = ucQueueType;
\r
480 #endif /* configUSE_TRACE_FACILITY */
\r
482 #if( configUSE_QUEUE_SETS == 1 )
\r
484 pxNewQueue->pxQueueSetContainer = NULL;
\r
486 #endif /* configUSE_QUEUE_SETS */
\r
488 traceQUEUE_CREATE( pxNewQueue );
\r
492 mtCOVERAGE_TEST_MARKER();
\r
495 configASSERT( pxNewQueue );
\r
497 return ( QueueHandle_t ) pxNewQueue;
\r
499 /*-----------------------------------------------------------*/
\r
501 #if ( configUSE_MUTEXES == 1 )
\r
503 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
\r
505 Queue_t *pxNewQueue;
\r
506 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
508 /* Prevent compiler warnings about unused parameters if
\r
509 configUSE_TRACE_FACILITY does not equal 1. */
\r
510 ( void ) ucQueueType;
\r
512 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
\r
514 /* Allocate the new queue structure. */
\r
515 if( pxNewQueue != NULL )
\r
517 /* xQueueGenericCreate() will set all the queue structure members
\r
518 correctly for a generic queue, but this function is creating a
\r
519 mutex. Overwrite those members that need to be set differently -
\r
520 in particular the information required for priority inheritance. */
\r
521 pxNewQueue->pxMutexHolder = NULL;
\r
522 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
524 /* In case this is a recursive mutex. */
\r
525 pxNewQueue->u.uxRecursiveCallCount = 0;
\r
527 traceCREATE_MUTEX( pxNewQueue );
\r
529 /* Start with the semaphore in the expected state. */
\r
530 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
534 traceCREATE_MUTEX_FAILED();
\r
540 #endif /* configUSE_MUTEXES */
\r
541 /*-----------------------------------------------------------*/
\r
543 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
545 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
549 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
550 be called directly. Note: This is a good way of determining if the
\r
551 calling task is the mutex holder, but not a good way of determining the
\r
552 identity of the mutex holder, as the holder may change between the
\r
553 following critical section exiting and the function returning. */
\r
554 taskENTER_CRITICAL();
\r
556 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
558 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
565 taskEXIT_CRITICAL();
\r
568 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
571 /*-----------------------------------------------------------*/
\r
573 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
575 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
577 BaseType_t xReturn;
\r
578 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
580 configASSERT( pxMutex );
\r
582 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
583 change outside of this task. If this task does not hold the mutex then
\r
584 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
585 this is the only condition we are interested in it does not matter if
\r
586 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
587 mutual exclusion is required to test the pxMutexHolder variable. */
\r
588 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
590 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
592 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
593 the task handle, therefore no underflow check is required. Also,
\r
594 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
595 there can only be one, no mutual exclusion is required to modify the
\r
596 uxRecursiveCallCount member. */
\r
597 ( pxMutex->u.uxRecursiveCallCount )--;
\r
599 /* Has the recursive call count unwound to 0? */
\r
600 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
602 /* Return the mutex. This will automatically unblock any other
\r
603 task that might be waiting to access the mutex. */
\r
604 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
608 mtCOVERAGE_TEST_MARKER();
\r
615 /* The mutex cannot be given because the calling task is not the
\r
619 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
625 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
626 /*-----------------------------------------------------------*/
\r
628 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
630 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
632 BaseType_t xReturn;
\r
633 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
635 configASSERT( pxMutex );
\r
637 /* Comments regarding mutual exclusion as per those within
\r
638 xQueueGiveMutexRecursive(). */
\r
640 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
642 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
644 ( pxMutex->u.uxRecursiveCallCount )++;
\r
649 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
651 /* pdPASS will only be returned if the mutex was successfully
\r
652 obtained. The calling task may have entered the Blocked state
\r
653 before reaching here. */
\r
654 if( xReturn != pdFAIL )
\r
656 ( pxMutex->u.uxRecursiveCallCount )++;
\r
660 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
667 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
668 /*-----------------------------------------------------------*/
\r
670 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
672 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
\r
674 QueueHandle_t xHandle;
\r
676 configASSERT( uxMaxCount != 0 );
\r
677 configASSERT( uxInitialCount <= uxMaxCount );
\r
679 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
681 if( xHandle != NULL )
\r
683 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
685 traceCREATE_COUNTING_SEMAPHORE();
\r
689 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
692 configASSERT( xHandle );
\r
696 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
697 /*-----------------------------------------------------------*/
\r
699 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
701 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
702 TimeOut_t xTimeOut;
\r
703 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
705 configASSERT( pxQueue );
\r
706 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
707 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
708 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
710 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
715 /* This function relaxes the coding standard somewhat to allow return
\r
716 statements within the function itself. This is done in the interest
\r
717 of execution time efficiency. */
\r
720 taskENTER_CRITICAL();
\r
722 /* Is there room on the queue now? The running task must be the
\r
723 highest priority task wanting to access the queue. If the head item
\r
724 in the queue is to be overwritten then it does not matter if the
\r
726 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
728 traceQUEUE_SEND( pxQueue );
\r
729 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
731 #if ( configUSE_QUEUE_SETS == 1 )
\r
733 if( pxQueue->pxQueueSetContainer != NULL )
\r
735 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
737 /* The queue is a member of a queue set, and posting
\r
738 to the queue set caused a higher priority task to
\r
739 unblock. A context switch is required. */
\r
740 queueYIELD_IF_USING_PREEMPTION();
\r
744 mtCOVERAGE_TEST_MARKER();
\r
749 /* If there was a task waiting for data to arrive on the
\r
750 queue then unblock it now. */
\r
751 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
753 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
755 /* The unblocked task has a priority higher than
\r
756 our own so yield immediately. Yes it is ok to
\r
757 do this from within the critical section - the
\r
758 kernel takes care of that. */
\r
759 queueYIELD_IF_USING_PREEMPTION();
\r
763 mtCOVERAGE_TEST_MARKER();
\r
766 else if( xYieldRequired != pdFALSE )
\r
768 /* This path is a special case that will only get
\r
769 executed if the task was holding multiple mutexes
\r
770 and the mutexes were given back in an order that is
\r
771 different to that in which they were taken. */
\r
772 queueYIELD_IF_USING_PREEMPTION();
\r
776 mtCOVERAGE_TEST_MARKER();
\r
780 #else /* configUSE_QUEUE_SETS */
\r
782 /* If there was a task waiting for data to arrive on the
\r
783 queue then unblock it now. */
\r
784 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
786 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
788 /* The unblocked task has a priority higher than
\r
789 our own so yield immediately. Yes it is ok to do
\r
790 this from within the critical section - the kernel
\r
791 takes care of that. */
\r
792 queueYIELD_IF_USING_PREEMPTION();
\r
796 mtCOVERAGE_TEST_MARKER();
\r
799 else if( xYieldRequired != pdFALSE )
\r
801 /* This path is a special case that will only get
\r
802 executed if the task was holding multiple mutexes and
\r
803 the mutexes were given back in an order that is
\r
804 different to that in which they were taken. */
\r
805 queueYIELD_IF_USING_PREEMPTION();
\r
809 mtCOVERAGE_TEST_MARKER();
\r
812 #endif /* configUSE_QUEUE_SETS */
\r
814 taskEXIT_CRITICAL();
\r
819 if( xTicksToWait == ( TickType_t ) 0 )
\r
821 /* The queue was full and no block time is specified (or
\r
822 the block time has expired) so leave now. */
\r
823 taskEXIT_CRITICAL();
\r
825 /* Return to the original privilege level before exiting
\r
827 traceQUEUE_SEND_FAILED( pxQueue );
\r
828 return errQUEUE_FULL;
\r
830 else if( xEntryTimeSet == pdFALSE )
\r
832 /* The queue was full and a block time was specified so
\r
833 configure the timeout structure. */
\r
834 vTaskSetTimeOutState( &xTimeOut );
\r
835 xEntryTimeSet = pdTRUE;
\r
839 /* Entry time was already set. */
\r
840 mtCOVERAGE_TEST_MARKER();
\r
844 taskEXIT_CRITICAL();
\r
846 /* Interrupts and other tasks can send to and receive from the queue
\r
847 now the critical section has been exited. */
\r
850 prvLockQueue( pxQueue );
\r
852 /* Update the timeout state to see if it has expired yet. */
\r
853 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
855 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
857 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
858 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
860 /* Unlocking the queue means queue events can effect the
\r
861 event list. It is possible that interrupts occurring now
\r
862 remove this task from the event list again - but as the
\r
863 scheduler is suspended the task will go onto the pending
\r
864 ready last instead of the actual ready list. */
\r
865 prvUnlockQueue( pxQueue );
\r
867 /* Resuming the scheduler will move tasks from the pending
\r
868 ready list into the ready list - so it is feasible that this
\r
869 task is already in a ready list before it yields - in which
\r
870 case the yield will not cause a context switch unless there
\r
871 is also a higher priority task in the pending ready list. */
\r
872 if( xTaskResumeAll() == pdFALSE )
\r
874 portYIELD_WITHIN_API();
\r
880 prvUnlockQueue( pxQueue );
\r
881 ( void ) xTaskResumeAll();
\r
886 /* The timeout has expired. */
\r
887 prvUnlockQueue( pxQueue );
\r
888 ( void ) xTaskResumeAll();
\r
890 traceQUEUE_SEND_FAILED( pxQueue );
\r
891 return errQUEUE_FULL;
\r
895 /*-----------------------------------------------------------*/
\r
897 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
899 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
901 BaseType_t xEntryTimeSet = pdFALSE;
\r
902 TimeOut_t xTimeOut;
\r
903 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
905 configASSERT( pxQueue );
\r
906 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
910 taskENTER_CRITICAL();
\r
912 /* Is there room on the queue now? To be running we must be
\r
913 the highest priority task wanting to access the queue. */
\r
914 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
916 traceQUEUE_SEND( pxQueue );
\r
917 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
919 /* If there was a task waiting for data to arrive on the
\r
920 queue then unblock it now. */
\r
921 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
923 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
925 /* The unblocked task has a priority higher than
\r
926 our own so yield immediately. */
\r
927 portYIELD_WITHIN_API();
\r
931 mtCOVERAGE_TEST_MARKER();
\r
936 mtCOVERAGE_TEST_MARKER();
\r
939 taskEXIT_CRITICAL();
\r
944 if( xTicksToWait == ( TickType_t ) 0 )
\r
946 taskEXIT_CRITICAL();
\r
947 return errQUEUE_FULL;
\r
949 else if( xEntryTimeSet == pdFALSE )
\r
951 vTaskSetTimeOutState( &xTimeOut );
\r
952 xEntryTimeSet = pdTRUE;
\r
956 taskEXIT_CRITICAL();
\r
958 taskENTER_CRITICAL();
\r
960 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
962 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
964 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
965 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
966 portYIELD_WITHIN_API();
\r
970 mtCOVERAGE_TEST_MARKER();
\r
975 taskEXIT_CRITICAL();
\r
976 traceQUEUE_SEND_FAILED( pxQueue );
\r
977 return errQUEUE_FULL;
\r
980 taskEXIT_CRITICAL();
\r
984 #endif /* configUSE_ALTERNATIVE_API */
\r
985 /*-----------------------------------------------------------*/
\r
987 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
989 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
991 BaseType_t xEntryTimeSet = pdFALSE;
\r
992 TimeOut_t xTimeOut;
\r
993 int8_t *pcOriginalReadPosition;
\r
994 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
996 configASSERT( pxQueue );
\r
997 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1001 taskENTER_CRITICAL();
\r
1003 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1005 /* Remember our read position in case we are just peeking. */
\r
1006 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1008 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1010 if( xJustPeeking == pdFALSE )
\r
1012 traceQUEUE_RECEIVE( pxQueue );
\r
1014 /* Data is actually being removed (not just peeked). */
\r
1015 --( pxQueue->uxMessagesWaiting );
\r
1017 #if ( configUSE_MUTEXES == 1 )
\r
1019 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1021 /* Record the information required to implement
\r
1022 priority inheritance should it become necessary. */
\r
1023 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
1027 mtCOVERAGE_TEST_MARKER();
\r
1032 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1034 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1036 portYIELD_WITHIN_API();
\r
1040 mtCOVERAGE_TEST_MARKER();
\r
1046 traceQUEUE_PEEK( pxQueue );
\r
1048 /* The data is not being removed, so reset our read
\r
1050 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1052 /* The data is being left in the queue, so see if there are
\r
1053 any other tasks waiting for the data. */
\r
1054 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1056 /* Tasks that are removed from the event list will get added to
\r
1057 the pending ready list as the scheduler is still suspended. */
\r
1058 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1060 /* The task waiting has a higher priority than this task. */
\r
1061 portYIELD_WITHIN_API();
\r
1065 mtCOVERAGE_TEST_MARKER();
\r
1070 mtCOVERAGE_TEST_MARKER();
\r
1074 taskEXIT_CRITICAL();
\r
1079 if( xTicksToWait == ( TickType_t ) 0 )
\r
1081 taskEXIT_CRITICAL();
\r
1082 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1083 return errQUEUE_EMPTY;
\r
1085 else if( xEntryTimeSet == pdFALSE )
\r
1087 vTaskSetTimeOutState( &xTimeOut );
\r
1088 xEntryTimeSet = pdTRUE;
\r
1092 taskEXIT_CRITICAL();
\r
1094 taskENTER_CRITICAL();
\r
1096 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1098 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1100 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1102 #if ( configUSE_MUTEXES == 1 )
\r
1104 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1106 taskENTER_CRITICAL();
\r
1108 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1110 taskEXIT_CRITICAL();
\r
1114 mtCOVERAGE_TEST_MARKER();
\r
1119 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1120 portYIELD_WITHIN_API();
\r
1124 mtCOVERAGE_TEST_MARKER();
\r
1129 taskEXIT_CRITICAL();
\r
1130 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1131 return errQUEUE_EMPTY;
\r
1134 taskEXIT_CRITICAL();
\r
1139 #endif /* configUSE_ALTERNATIVE_API */
\r
1140 /*-----------------------------------------------------------*/
\r
1142 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1144 BaseType_t xReturn;
\r
1145 UBaseType_t uxSavedInterruptStatus;
\r
1146 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1148 configASSERT( pxQueue );
\r
1149 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1150 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1152 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1153 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1154 above the maximum system call priority are kept permanently enabled, even
\r
1155 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1156 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1157 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1158 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1159 assigned a priority above the configured maximum system call priority.
\r
1160 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1161 that have been assigned a priority at or (logically) below the maximum
\r
1162 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1163 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1164 More information (albeit Cortex-M specific) is provided on the following
\r
1165 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1166 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1168 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1169 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1170 read, instead return a flag to say whether a context switch is required or
\r
1171 not (i.e. has a task with a higher priority than us been woken by this
\r
1173 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1175 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1177 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1179 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
1180 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
1181 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
1182 called here even though the disinherit function does not check if
\r
1183 the scheduler is suspended before accessing the ready lists. */
\r
1184 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1186 /* The event list is not altered if the queue is locked. This will
\r
1187 be done when the queue is unlocked later. */
\r
1188 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1190 #if ( configUSE_QUEUE_SETS == 1 )
\r
1192 if( pxQueue->pxQueueSetContainer != NULL )
\r
1194 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
1196 /* The queue is a member of a queue set, and posting
\r
1197 to the queue set caused a higher priority task to
\r
1198 unblock. A context switch is required. */
\r
1199 if( pxHigherPriorityTaskWoken != NULL )
\r
1201 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1205 mtCOVERAGE_TEST_MARKER();
\r
1210 mtCOVERAGE_TEST_MARKER();
\r
1215 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1217 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1219 /* The task waiting has a higher priority so
\r
1220 record that a context switch is required. */
\r
1221 if( pxHigherPriorityTaskWoken != NULL )
\r
1223 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1227 mtCOVERAGE_TEST_MARKER();
\r
1232 mtCOVERAGE_TEST_MARKER();
\r
1237 mtCOVERAGE_TEST_MARKER();
\r
1241 #else /* configUSE_QUEUE_SETS */
\r
1243 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1245 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1247 /* The task waiting has a higher priority so record that a
\r
1248 context switch is required. */
\r
1249 if( pxHigherPriorityTaskWoken != NULL )
\r
1251 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1255 mtCOVERAGE_TEST_MARKER();
\r
1260 mtCOVERAGE_TEST_MARKER();
\r
1265 mtCOVERAGE_TEST_MARKER();
\r
1268 #endif /* configUSE_QUEUE_SETS */
\r
1272 /* Increment the lock count so the task that unlocks the queue
\r
1273 knows that data was posted while it was locked. */
\r
1274 ++( pxQueue->xTxLock );
\r
1281 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1282 xReturn = errQUEUE_FULL;
\r
1285 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1289 /*-----------------------------------------------------------*/
\r
1291 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1293 BaseType_t xReturn;
\r
1294 UBaseType_t uxSavedInterruptStatus;
\r
1295 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1297 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1298 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1299 read, instead return a flag to say whether a context switch is required or
\r
1300 not (i.e. has a task with a higher priority than us been woken by this
\r
1303 configASSERT( pxQueue );
\r
1305 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1306 if the item size is not 0. */
\r
1307 configASSERT( pxQueue->uxItemSize == 0 );
\r
1309 /* Normally a mutex would not be given from an interrupt, especially if
\r
1310 there is a mutex holder, as priority inheritance makes no sense for an
\r
1311 interrupts, only tasks. */
\r
1312 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1314 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1315 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1316 above the maximum system call priority are kept permanently enabled, even
\r
1317 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1318 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1319 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1320 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1321 assigned a priority above the configured maximum system call priority.
\r
1322 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1323 that have been assigned a priority at or (logically) below the maximum
\r
1324 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1325 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1326 More information (albeit Cortex-M specific) is provided on the following
\r
1327 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1328 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1330 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1332 /* When the queue is used to implement a semaphore no data is ever
\r
1333 moved through the queue but it is still valid to see if the queue 'has
\r
1335 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1337 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1339 /* A task can only have an inherited priority if it is a mutex
\r
1340 holder - and if there is a mutex holder then the mutex cannot be
\r
1341 given from an ISR. As this is the ISR version of the function it
\r
1342 can be assumed there is no mutex holder and no need to determine if
\r
1343 priority disinheritance is needed. Simply increase the count of
\r
1344 messages (semaphores) available. */
\r
1345 ++( pxQueue->uxMessagesWaiting );
\r
1347 /* The event list is not altered if the queue is locked. This will
\r
1348 be done when the queue is unlocked later. */
\r
1349 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1351 #if ( configUSE_QUEUE_SETS == 1 )
\r
1353 if( pxQueue->pxQueueSetContainer != NULL )
\r
1355 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
1357 /* The semaphore is a member of a queue set, and
\r
1358 posting to the queue set caused a higher priority
\r
1359 task to unblock. A context switch is required. */
\r
1360 if( pxHigherPriorityTaskWoken != NULL )
\r
1362 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1366 mtCOVERAGE_TEST_MARKER();
\r
1371 mtCOVERAGE_TEST_MARKER();
\r
1376 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1378 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1380 /* The task waiting has a higher priority so
\r
1381 record that a context switch is required. */
\r
1382 if( pxHigherPriorityTaskWoken != NULL )
\r
1384 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1388 mtCOVERAGE_TEST_MARKER();
\r
1393 mtCOVERAGE_TEST_MARKER();
\r
1398 mtCOVERAGE_TEST_MARKER();
\r
1402 #else /* configUSE_QUEUE_SETS */
\r
1404 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1406 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1408 /* The task waiting has a higher priority so record that a
\r
1409 context switch is required. */
\r
1410 if( pxHigherPriorityTaskWoken != NULL )
\r
1412 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1416 mtCOVERAGE_TEST_MARKER();
\r
1421 mtCOVERAGE_TEST_MARKER();
\r
1426 mtCOVERAGE_TEST_MARKER();
\r
1429 #endif /* configUSE_QUEUE_SETS */
\r
1433 /* Increment the lock count so the task that unlocks the queue
\r
1434 knows that data was posted while it was locked. */
\r
1435 ++( pxQueue->xTxLock );
\r
1442 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1443 xReturn = errQUEUE_FULL;
\r
1446 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1450 /*-----------------------------------------------------------*/
\r
1452 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1454 BaseType_t xEntryTimeSet = pdFALSE;
\r
1455 TimeOut_t xTimeOut;
\r
1456 int8_t *pcOriginalReadPosition;
\r
1457 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1459 configASSERT( pxQueue );
\r
1460 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1461 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1463 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1467 /* This function relaxes the coding standard somewhat to allow return
\r
1468 statements within the function itself. This is done in the interest
\r
1469 of execution time efficiency. */
\r
1473 taskENTER_CRITICAL();
\r
1475 /* Is there data in the queue now? To be running the calling task
\r
1476 must be the highest priority task wanting to access the queue. */
\r
1477 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1479 /* Remember the read position in case the queue is only being
\r
1481 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1483 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1485 if( xJustPeeking == pdFALSE )
\r
1487 traceQUEUE_RECEIVE( pxQueue );
\r
1489 /* Actually removing data, not just peeking. */
\r
1490 --( pxQueue->uxMessagesWaiting );
\r
1492 #if ( configUSE_MUTEXES == 1 )
\r
1494 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1496 /* Record the information required to implement
\r
1497 priority inheritance should it become necessary. */
\r
1498 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1502 mtCOVERAGE_TEST_MARKER();
\r
1505 #endif /* configUSE_MUTEXES */
\r
1507 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1509 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1511 queueYIELD_IF_USING_PREEMPTION();
\r
1515 mtCOVERAGE_TEST_MARKER();
\r
1520 mtCOVERAGE_TEST_MARKER();
\r
1525 traceQUEUE_PEEK( pxQueue );
\r
1527 /* The data is not being removed, so reset the read
\r
1529 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1531 /* The data is being left in the queue, so see if there are
\r
1532 any other tasks waiting for the data. */
\r
1533 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1535 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1537 /* The task waiting has a higher priority than this task. */
\r
1538 queueYIELD_IF_USING_PREEMPTION();
\r
1542 mtCOVERAGE_TEST_MARKER();
\r
1547 mtCOVERAGE_TEST_MARKER();
\r
1551 taskEXIT_CRITICAL();
\r
1556 if( xTicksToWait == ( TickType_t ) 0 )
\r
1558 /* The queue was empty and no block time is specified (or
\r
1559 the block time has expired) so leave now. */
\r
1560 taskEXIT_CRITICAL();
\r
1561 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1562 return errQUEUE_EMPTY;
\r
1564 else if( xEntryTimeSet == pdFALSE )
\r
1566 /* The queue was empty and a block time was specified so
\r
1567 configure the timeout structure. */
\r
1568 vTaskSetTimeOutState( &xTimeOut );
\r
1569 xEntryTimeSet = pdTRUE;
\r
1573 /* Entry time was already set. */
\r
1574 mtCOVERAGE_TEST_MARKER();
\r
1578 taskEXIT_CRITICAL();
\r
1580 /* Interrupts and other tasks can send to and receive from the queue
\r
1581 now the critical section has been exited. */
\r
1583 vTaskSuspendAll();
\r
1584 prvLockQueue( pxQueue );
\r
1586 /* Update the timeout state to see if it has expired yet. */
\r
1587 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1589 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1591 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1593 #if ( configUSE_MUTEXES == 1 )
\r
1595 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1597 taskENTER_CRITICAL();
\r
1599 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1601 taskEXIT_CRITICAL();
\r
1605 mtCOVERAGE_TEST_MARKER();
\r
1610 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1611 prvUnlockQueue( pxQueue );
\r
1612 if( xTaskResumeAll() == pdFALSE )
\r
1614 portYIELD_WITHIN_API();
\r
1618 mtCOVERAGE_TEST_MARKER();
\r
1624 prvUnlockQueue( pxQueue );
\r
1625 ( void ) xTaskResumeAll();
\r
1630 prvUnlockQueue( pxQueue );
\r
1631 ( void ) xTaskResumeAll();
\r
1633 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1635 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1636 return errQUEUE_EMPTY;
\r
1640 mtCOVERAGE_TEST_MARKER();
\r
1645 /*-----------------------------------------------------------*/
\r
1647 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1649 BaseType_t xReturn;
\r
1650 UBaseType_t uxSavedInterruptStatus;
\r
1651 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1653 configASSERT( pxQueue );
\r
1654 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1656 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1657 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1658 above the maximum system call priority are kept permanently enabled, even
\r
1659 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1660 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1661 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1662 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1663 assigned a priority above the configured maximum system call priority.
\r
1664 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1665 that have been assigned a priority at or (logically) below the maximum
\r
1666 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1667 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1668 More information (albeit Cortex-M specific) is provided on the following
\r
1669 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1670 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1672 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1674 /* Cannot block in an ISR, so check there is data available. */
\r
1675 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1677 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1679 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1680 --( pxQueue->uxMessagesWaiting );
\r
1682 /* If the queue is locked the event list will not be modified.
\r
1683 Instead update the lock count so the task that unlocks the queue
\r
1684 will know that an ISR has removed data while the queue was
\r
1686 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1688 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1690 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1692 /* The task waiting has a higher priority than us so
\r
1693 force a context switch. */
\r
1694 if( pxHigherPriorityTaskWoken != NULL )
\r
1696 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1700 mtCOVERAGE_TEST_MARKER();
\r
1705 mtCOVERAGE_TEST_MARKER();
\r
1710 mtCOVERAGE_TEST_MARKER();
\r
1715 /* Increment the lock count so the task that unlocks the queue
\r
1716 knows that data was removed while it was locked. */
\r
1717 ++( pxQueue->xRxLock );
\r
1725 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1728 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1732 /*-----------------------------------------------------------*/
\r
1734 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1736 BaseType_t xReturn;
\r
1737 UBaseType_t uxSavedInterruptStatus;
\r
1738 int8_t *pcOriginalReadPosition;
\r
1739 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1741 configASSERT( pxQueue );
\r
1742 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1743 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1745 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1746 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1747 above the maximum system call priority are kept permanently enabled, even
\r
1748 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1749 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1750 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1751 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1752 assigned a priority above the configured maximum system call priority.
\r
1753 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1754 that have been assigned a priority at or (logically) below the maximum
\r
1755 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1756 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1757 More information (albeit Cortex-M specific) is provided on the following
\r
1758 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1759 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1761 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1763 /* Cannot block in an ISR, so check there is data available. */
\r
1764 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1766 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1768 /* Remember the read position so it can be reset as nothing is
\r
1769 actually being removed from the queue. */
\r
1770 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1771 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1772 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1779 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1782 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1786 /*-----------------------------------------------------------*/
\r
1788 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1790 UBaseType_t uxReturn;
\r
1792 configASSERT( xQueue );
\r
1794 taskENTER_CRITICAL();
\r
1796 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1798 taskEXIT_CRITICAL();
\r
1801 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1802 /*-----------------------------------------------------------*/
\r
1804 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1806 UBaseType_t uxReturn;
\r
1809 pxQueue = ( Queue_t * ) xQueue;
\r
1810 configASSERT( pxQueue );
\r
1812 taskENTER_CRITICAL();
\r
1814 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1816 taskEXIT_CRITICAL();
\r
1819 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1820 /*-----------------------------------------------------------*/
\r
1822 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1824 UBaseType_t uxReturn;
\r
1826 configASSERT( xQueue );
\r
1828 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1831 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1832 /*-----------------------------------------------------------*/
\r
1834 void vQueueDelete( QueueHandle_t xQueue )
\r
1836 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1838 configASSERT( pxQueue );
\r
1840 traceQUEUE_DELETE( pxQueue );
\r
1841 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1843 vQueueUnregisterQueue( pxQueue );
\r
1847 #if( configSUPPORT_STATIC_ALLOCATION == 0 )
\r
1849 /* The queue and the queue storage area will have been dynamically
\r
1850 allocated in one go. */
\r
1851 vPortFree( pxQueue );
\r
1855 if( ( pxQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_STORAGE ) == 0 )
\r
1857 /* The queue storage area was dynamically allocated, so must be
\r
1859 vPortFree( pxQueue->pcHead );
\r
1863 mtCOVERAGE_TEST_MARKER();
\r
1866 if( ( pxQueue->ucStaticAllocationFlags & queueSTATICALLY_ALLOCATED_QUEUE_STRUCT ) == 0 )
\r
1868 /* The queue structure was dynamically allocated, so must be
\r
1870 vPortFree( pxQueue );
\r
1874 mtCOVERAGE_TEST_MARKER();
\r
1879 /*-----------------------------------------------------------*/
\r
1881 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1883 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1885 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1888 #endif /* configUSE_TRACE_FACILITY */
\r
1889 /*-----------------------------------------------------------*/
\r
1891 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1893 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1895 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1898 #endif /* configUSE_TRACE_FACILITY */
\r
1899 /*-----------------------------------------------------------*/
\r
1901 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1903 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1905 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1908 #endif /* configUSE_TRACE_FACILITY */
\r
1909 /*-----------------------------------------------------------*/
\r
1911 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1913 BaseType_t xReturn = pdFALSE;
\r
1915 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1917 #if ( configUSE_MUTEXES == 1 )
\r
1919 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1921 /* The mutex is no longer being held. */
\r
1922 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1923 pxQueue->pxMutexHolder = NULL;
\r
1927 mtCOVERAGE_TEST_MARKER();
\r
1930 #endif /* configUSE_MUTEXES */
\r
1932 else if( xPosition == queueSEND_TO_BACK )
\r
1934 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1935 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1936 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1938 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1942 mtCOVERAGE_TEST_MARKER();
\r
1947 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1948 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1949 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1951 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1955 mtCOVERAGE_TEST_MARKER();
\r
1958 if( xPosition == queueOVERWRITE )
\r
1960 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1962 /* An item is not being added but overwritten, so subtract
\r
1963 one from the recorded number of items in the queue so when
\r
1964 one is added again below the number of recorded items remains
\r
1966 --( pxQueue->uxMessagesWaiting );
\r
1970 mtCOVERAGE_TEST_MARKER();
\r
1975 mtCOVERAGE_TEST_MARKER();
\r
1979 ++( pxQueue->uxMessagesWaiting );
\r
1983 /*-----------------------------------------------------------*/
\r
1985 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1987 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
1989 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1990 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1992 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1996 mtCOVERAGE_TEST_MARKER();
\r
1998 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
2001 /*-----------------------------------------------------------*/
\r
2003 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
2005 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
2007 /* The lock counts contains the number of extra data items placed or
\r
2008 removed from the queue while the queue was locked. When a queue is
\r
2009 locked items can be added or removed, but the event lists cannot be
\r
2011 taskENTER_CRITICAL();
\r
2013 /* See if data was added to the queue while it was locked. */
\r
2014 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
2016 /* Data was posted while the queue was locked. Are any tasks
\r
2017 blocked waiting for data to become available? */
\r
2018 #if ( configUSE_QUEUE_SETS == 1 )
\r
2020 if( pxQueue->pxQueueSetContainer != NULL )
\r
2022 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
2024 /* The queue is a member of a queue set, and posting to
\r
2025 the queue set caused a higher priority task to unblock.
\r
2026 A context switch is required. */
\r
2027 vTaskMissedYield();
\r
2031 mtCOVERAGE_TEST_MARKER();
\r
2036 /* Tasks that are removed from the event list will get
\r
2037 added to the pending ready list as the scheduler is still
\r
2039 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2041 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2043 /* The task waiting has a higher priority so record that a
\r
2044 context switch is required. */
\r
2045 vTaskMissedYield();
\r
2049 mtCOVERAGE_TEST_MARKER();
\r
2058 #else /* configUSE_QUEUE_SETS */
\r
2060 /* Tasks that are removed from the event list will get added to
\r
2061 the pending ready list as the scheduler is still suspended. */
\r
2062 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2064 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2066 /* The task waiting has a higher priority so record that
\r
2067 a context switch is required. */
\r
2068 vTaskMissedYield();
\r
2072 mtCOVERAGE_TEST_MARKER();
\r
2080 #endif /* configUSE_QUEUE_SETS */
\r
2082 --( pxQueue->xTxLock );
\r
2085 pxQueue->xTxLock = queueUNLOCKED;
\r
2087 taskEXIT_CRITICAL();
\r
2089 /* Do the same for the Rx lock. */
\r
2090 taskENTER_CRITICAL();
\r
2092 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
2094 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2096 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2098 vTaskMissedYield();
\r
2102 mtCOVERAGE_TEST_MARKER();
\r
2105 --( pxQueue->xRxLock );
\r
2113 pxQueue->xRxLock = queueUNLOCKED;
\r
2115 taskEXIT_CRITICAL();
\r
2117 /*-----------------------------------------------------------*/
\r
2119 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2121 BaseType_t xReturn;
\r
2123 taskENTER_CRITICAL();
\r
2125 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2131 xReturn = pdFALSE;
\r
2134 taskEXIT_CRITICAL();
\r
2138 /*-----------------------------------------------------------*/
\r
2140 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2142 BaseType_t xReturn;
\r
2144 configASSERT( xQueue );
\r
2145 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2151 xReturn = pdFALSE;
\r
2155 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2156 /*-----------------------------------------------------------*/
\r
2158 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2160 BaseType_t xReturn;
\r
2162 taskENTER_CRITICAL();
\r
2164 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2170 xReturn = pdFALSE;
\r
2173 taskEXIT_CRITICAL();
\r
2177 /*-----------------------------------------------------------*/
\r
2179 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2181 BaseType_t xReturn;
\r
2183 configASSERT( xQueue );
\r
2184 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2190 xReturn = pdFALSE;
\r
2194 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2195 /*-----------------------------------------------------------*/
\r
2197 #if ( configUSE_CO_ROUTINES == 1 )
\r
2199 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2201 BaseType_t xReturn;
\r
2202 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2204 /* If the queue is already full we may have to block. A critical section
\r
2205 is required to prevent an interrupt removing something from the queue
\r
2206 between the check to see if the queue is full and blocking on the queue. */
\r
2207 portDISABLE_INTERRUPTS();
\r
2209 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2211 /* The queue is full - do we want to block or just leave without
\r
2213 if( xTicksToWait > ( TickType_t ) 0 )
\r
2215 /* As this is called from a coroutine we cannot block directly, but
\r
2216 return indicating that we need to block. */
\r
2217 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2218 portENABLE_INTERRUPTS();
\r
2219 return errQUEUE_BLOCKED;
\r
2223 portENABLE_INTERRUPTS();
\r
2224 return errQUEUE_FULL;
\r
2228 portENABLE_INTERRUPTS();
\r
2230 portDISABLE_INTERRUPTS();
\r
2232 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2234 /* There is room in the queue, copy the data into the queue. */
\r
2235 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2238 /* Were any co-routines waiting for data to become available? */
\r
2239 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2241 /* In this instance the co-routine could be placed directly
\r
2242 into the ready list as we are within a critical section.
\r
2243 Instead the same pending ready list mechanism is used as if
\r
2244 the event were caused from within an interrupt. */
\r
2245 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2247 /* The co-routine waiting has a higher priority so record
\r
2248 that a yield might be appropriate. */
\r
2249 xReturn = errQUEUE_YIELD;
\r
2253 mtCOVERAGE_TEST_MARKER();
\r
2258 mtCOVERAGE_TEST_MARKER();
\r
2263 xReturn = errQUEUE_FULL;
\r
2266 portENABLE_INTERRUPTS();
\r
2271 #endif /* configUSE_CO_ROUTINES */
\r
2272 /*-----------------------------------------------------------*/
\r
2274 #if ( configUSE_CO_ROUTINES == 1 )
\r
2276 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2278 BaseType_t xReturn;
\r
2279 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2281 /* If the queue is already empty we may have to block. A critical section
\r
2282 is required to prevent an interrupt adding something to the queue
\r
2283 between the check to see if the queue is empty and blocking on the queue. */
\r
2284 portDISABLE_INTERRUPTS();
\r
2286 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2288 /* There are no messages in the queue, do we want to block or just
\r
2289 leave with nothing? */
\r
2290 if( xTicksToWait > ( TickType_t ) 0 )
\r
2292 /* As this is a co-routine we cannot block directly, but return
\r
2293 indicating that we need to block. */
\r
2294 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2295 portENABLE_INTERRUPTS();
\r
2296 return errQUEUE_BLOCKED;
\r
2300 portENABLE_INTERRUPTS();
\r
2301 return errQUEUE_FULL;
\r
2306 mtCOVERAGE_TEST_MARKER();
\r
2309 portENABLE_INTERRUPTS();
\r
2311 portDISABLE_INTERRUPTS();
\r
2313 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2315 /* Data is available from the queue. */
\r
2316 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2317 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2319 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2323 mtCOVERAGE_TEST_MARKER();
\r
2325 --( pxQueue->uxMessagesWaiting );
\r
2326 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2330 /* Were any co-routines waiting for space to become available? */
\r
2331 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2333 /* In this instance the co-routine could be placed directly
\r
2334 into the ready list as we are within a critical section.
\r
2335 Instead the same pending ready list mechanism is used as if
\r
2336 the event were caused from within an interrupt. */
\r
2337 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2339 xReturn = errQUEUE_YIELD;
\r
2343 mtCOVERAGE_TEST_MARKER();
\r
2348 mtCOVERAGE_TEST_MARKER();
\r
2356 portENABLE_INTERRUPTS();
\r
2361 #endif /* configUSE_CO_ROUTINES */
\r
2362 /*-----------------------------------------------------------*/
\r
2364 #if ( configUSE_CO_ROUTINES == 1 )
\r
2366 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2368 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2370 /* Cannot block within an ISR so if there is no space on the queue then
\r
2371 exit without doing anything. */
\r
2372 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2374 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2376 /* We only want to wake one co-routine per ISR, so check that a
\r
2377 co-routine has not already been woken. */
\r
2378 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2380 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2382 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2388 mtCOVERAGE_TEST_MARKER();
\r
2393 mtCOVERAGE_TEST_MARKER();
\r
2398 mtCOVERAGE_TEST_MARKER();
\r
2403 mtCOVERAGE_TEST_MARKER();
\r
2406 return xCoRoutinePreviouslyWoken;
\r
2409 #endif /* configUSE_CO_ROUTINES */
\r
2410 /*-----------------------------------------------------------*/
\r
2412 #if ( configUSE_CO_ROUTINES == 1 )
\r
2414 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2416 BaseType_t xReturn;
\r
2417 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2419 /* We cannot block from an ISR, so check there is data available. If
\r
2420 not then just leave without doing anything. */
\r
2421 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2423 /* Copy the data from the queue. */
\r
2424 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2425 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2427 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2431 mtCOVERAGE_TEST_MARKER();
\r
2433 --( pxQueue->uxMessagesWaiting );
\r
2434 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2436 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2438 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2440 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2442 *pxCoRoutineWoken = pdTRUE;
\r
2446 mtCOVERAGE_TEST_MARKER();
\r
2451 mtCOVERAGE_TEST_MARKER();
\r
2456 mtCOVERAGE_TEST_MARKER();
\r
2469 #endif /* configUSE_CO_ROUTINES */
\r
2470 /*-----------------------------------------------------------*/
\r
2472 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2474 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2478 /* See if there is an empty space in the registry. A NULL name denotes
\r
2480 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2482 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2484 /* Store the information on this queue. */
\r
2485 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2486 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2488 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2493 mtCOVERAGE_TEST_MARKER();
\r
2498 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2499 /*-----------------------------------------------------------*/
\r
2501 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2503 const char *pcQueueGetQueueName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2506 const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2508 /* Note there is nothing here to protect against another task adding or
\r
2509 removing entries from the registry while it is being searched. */
\r
2510 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2512 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2514 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2519 mtCOVERAGE_TEST_MARKER();
\r
2526 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2527 /*-----------------------------------------------------------*/
\r
2529 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2531 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2535 /* See if the handle of the queue being unregistered in actually in the
\r
2537 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2539 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2541 /* Set the name to NULL to show that this slot if free again. */
\r
2542 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2544 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2545 appear in the registry twice if it is added, removed, then
\r
2547 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2552 mtCOVERAGE_TEST_MARKER();
\r
2556 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2558 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2559 /*-----------------------------------------------------------*/
\r
2561 #if ( configUSE_TIMERS == 1 )
\r
2563 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2565 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2567 /* This function should not be called by application code hence the
\r
2568 'Restricted' in its name. It is not part of the public API. It is
\r
2569 designed for use by kernel code, and has special calling requirements.
\r
2570 It can result in vListInsert() being called on a list that can only
\r
2571 possibly ever have one item in it, so the list will be fast, but even
\r
2572 so it should be called with the scheduler locked and not from a critical
\r
2575 /* Only do anything if there are no messages in the queue. This function
\r
2576 will not actually cause the task to block, just place it on a blocked
\r
2577 list. It will not block until the scheduler is unlocked - at which
\r
2578 time a yield will be performed. If an item is added to the queue while
\r
2579 the queue is locked, and the calling task blocks on the queue, then the
\r
2580 calling task will be immediately unblocked when the queue is unlocked. */
\r
2581 prvLockQueue( pxQueue );
\r
2582 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2584 /* There is nothing in the queue, block for the specified period. */
\r
2585 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2589 mtCOVERAGE_TEST_MARKER();
\r
2591 prvUnlockQueue( pxQueue );
\r
2594 #endif /* configUSE_TIMERS */
\r
2595 /*-----------------------------------------------------------*/
\r
2597 #if ( configUSE_QUEUE_SETS == 1 )
\r
2599 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2601 QueueSetHandle_t pxQueue;
\r
2603 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), NULL, NULL, queueQUEUE_TYPE_SET );
\r
2608 #endif /* configUSE_QUEUE_SETS */
\r
2609 /*-----------------------------------------------------------*/
\r
2611 #if ( configUSE_QUEUE_SETS == 1 )
\r
2613 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2615 BaseType_t xReturn;
\r
2617 taskENTER_CRITICAL();
\r
2619 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2621 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2624 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2626 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2627 items in the queue/semaphore. */
\r
2632 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2636 taskEXIT_CRITICAL();
\r
2641 #endif /* configUSE_QUEUE_SETS */
\r
2642 /*-----------------------------------------------------------*/
\r
2644 #if ( configUSE_QUEUE_SETS == 1 )
\r
2646 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2648 BaseType_t xReturn;
\r
2649 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2651 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2653 /* The queue was not a member of the set. */
\r
2656 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2658 /* It is dangerous to remove a queue from a set when the queue is
\r
2659 not empty because the queue set will still hold pending events for
\r
2665 taskENTER_CRITICAL();
\r
2667 /* The queue is no longer contained in the set. */
\r
2668 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2670 taskEXIT_CRITICAL();
\r
2675 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2677 #endif /* configUSE_QUEUE_SETS */
\r
2678 /*-----------------------------------------------------------*/
\r
2680 #if ( configUSE_QUEUE_SETS == 1 )
\r
2682 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2684 QueueSetMemberHandle_t xReturn = NULL;
\r
2686 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2690 #endif /* configUSE_QUEUE_SETS */
\r
2691 /*-----------------------------------------------------------*/
\r
2693 #if ( configUSE_QUEUE_SETS == 1 )
\r
2695 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2697 QueueSetMemberHandle_t xReturn = NULL;
\r
2699 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2703 #endif /* configUSE_QUEUE_SETS */
\r
2704 /*-----------------------------------------------------------*/
\r
2706 #if ( configUSE_QUEUE_SETS == 1 )
\r
2708 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2710 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2711 BaseType_t xReturn = pdFALSE;
\r
2713 /* This function must be called form a critical section. */
\r
2715 configASSERT( pxQueueSetContainer );
\r
2716 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2718 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2720 traceQUEUE_SEND( pxQueueSetContainer );
\r
2722 /* The data copied is the handle of the queue that contains data. */
\r
2723 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2725 if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
\r
2727 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2729 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2731 /* The task waiting has a higher priority. */
\r
2736 mtCOVERAGE_TEST_MARKER();
\r
2741 mtCOVERAGE_TEST_MARKER();
\r
2746 ( pxQueueSetContainer->xTxLock )++;
\r
2751 mtCOVERAGE_TEST_MARKER();
\r
2757 #endif /* configUSE_QUEUE_SETS */
\r