2 * FreeRTOS Kernel V10.0.1
\r
3 * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software.
\r
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 * http://www.FreeRTOS.org
\r
23 * http://aws.amazon.com/freertos
\r
25 * 1 tab == 4 spaces!
\r
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
32 all the API functions to use the MPU wrappers. That should only be done when
\r
33 task.h is included from an application file. */
\r
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
36 #include "FreeRTOS.h"
\r
40 #if ( configUSE_CO_ROUTINES == 1 )
\r
41 #include "croutine.h"
\r
44 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
45 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
46 header files above, but not in this file, in order to generate the correct
\r
47 privileged Vs unprivileged linkage and placement. */
\r
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
51 /* Constants used with the cRxLock and cTxLock structure members. */
\r
52 #define queueUNLOCKED ( ( int8_t ) -1 )
\r
53 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
\r
55 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
56 pcTail members are used as pointers into the queue storage area. When the
\r
57 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
58 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
59 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
60 names to the pcHead and pcTail structure members to ensure the readability of
\r
61 the code is maintained despite this dual use of two structure members. An
\r
62 alternative implementation would be to use a union, but use of a union is
\r
63 against the coding standard (although an exception to the standard has been
\r
64 permitted where the dual use also significantly changes the type of the
\r
65 structure member). */
\r
66 #define pxMutexHolder pcTail
\r
67 #define uxQueueType pcHead
\r
68 #define queueQUEUE_IS_MUTEX NULL
\r
70 /* Semaphores do not actually store or copy data, so have an item size of
\r
72 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
73 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
75 #if( configUSE_PREEMPTION == 0 )
\r
76 /* If the cooperative scheduler is being used then a yield should not be
\r
77 performed just because a higher priority task has been woken. */
\r
78 #define queueYIELD_IF_USING_PREEMPTION()
\r
80 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
84 * Definition of the queue used by the scheduler.
\r
85 * Items are queued by copy, not reference. See the following link for the
\r
86 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
88 typedef struct QueueDefinition
\r
90 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
91 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
92 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
94 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
96 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
97 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
100 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
101 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
103 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
104 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
105 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
107 volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
108 volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
110 #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
111 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
\r
114 #if ( configUSE_QUEUE_SETS == 1 )
\r
115 struct QueueDefinition *pxQueueSetContainer;
\r
118 #if ( configUSE_TRACE_FACILITY == 1 )
\r
119 UBaseType_t uxQueueNumber;
\r
120 uint8_t ucQueueType;
\r
125 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
126 name below to enable the use of older kernel aware debuggers. */
\r
127 typedef xQUEUE Queue_t;
\r
129 /*-----------------------------------------------------------*/
\r
132 * The queue registry is just a means for kernel aware debuggers to locate
\r
133 * queue structures. It has no other purpose so is an optional component.
\r
135 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
137 /* The type stored within the queue registry array. This allows a name
\r
138 to be assigned to each queue making kernel aware debugging a little
\r
139 more user friendly. */
\r
140 typedef struct QUEUE_REGISTRY_ITEM
\r
142 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
143 QueueHandle_t xHandle;
\r
144 } xQueueRegistryItem;
\r
146 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
147 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
149 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
151 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
152 The pcQueueName member of a structure being NULL is indicative of the
\r
153 array position being vacant. */
\r
154 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
156 #endif /* configQUEUE_REGISTRY_SIZE */
\r
159 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
160 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
161 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
162 * queue is locked it will instead increment the appropriate queue lock count
\r
163 * to indicate that a task may require unblocking. When the queue in unlocked
\r
164 * these lock counts are inspected, and the appropriate action taken.
\r
166 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
169 * Uses a critical section to determine if there is any data in a queue.
\r
171 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
173 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
176 * Uses a critical section to determine if there is any space in a queue.
\r
178 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
180 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
183 * Copies an item into the queue, either at the front of the queue or the
\r
184 * back of the queue.
\r
186 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
189 * Copies an item out of a queue.
\r
191 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
193 #if ( configUSE_QUEUE_SETS == 1 )
\r
195 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
196 * the queue set that the queue contains data.
\r
198 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
202 * Called after a Queue_t structure has been allocated either statically or
\r
203 * dynamically to fill in the structure's members.
\r
205 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
208 * Mutexes are a special type of queue. When a mutex is created, first the
\r
209 * queue is created, then prvInitialiseMutex() is called to configure the queue
\r
212 #if( configUSE_MUTEXES == 1 )
\r
213 static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
216 #if( configUSE_MUTEXES == 1 )
\r
218 * If a task waiting for a mutex causes the mutex holder to inherit a
\r
219 * priority, but the waiting task times out, then the holder should
\r
220 * disinherit the priority - but only down to the highest priority of any
\r
221 * other tasks that are waiting for the same mutex. This function returns
\r
224 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
226 /*-----------------------------------------------------------*/
\r
229 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
230 * accessing the queue event lists.
\r
232 #define prvLockQueue( pxQueue ) \
\r
233 taskENTER_CRITICAL(); \
\r
235 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
\r
237 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
\r
239 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
\r
241 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
\r
244 taskEXIT_CRITICAL()
\r
245 /*-----------------------------------------------------------*/
\r
247 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
249 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
251 configASSERT( pxQueue );
\r
253 taskENTER_CRITICAL();
\r
255 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
256 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
257 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
258 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
259 pxQueue->cRxLock = queueUNLOCKED;
\r
260 pxQueue->cTxLock = queueUNLOCKED;
\r
262 if( xNewQueue == pdFALSE )
\r
264 /* If there are tasks blocked waiting to read from the queue, then
\r
265 the tasks will remain blocked as after this function exits the queue
\r
266 will still be empty. If there are tasks blocked waiting to write to
\r
267 the queue, then one should be unblocked as after this function exits
\r
268 it will be possible to write to it. */
\r
269 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
271 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
273 queueYIELD_IF_USING_PREEMPTION();
\r
277 mtCOVERAGE_TEST_MARKER();
\r
282 mtCOVERAGE_TEST_MARKER();
\r
287 /* Ensure the event queues start in the correct state. */
\r
288 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
289 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
292 taskEXIT_CRITICAL();
\r
294 /* A value is returned for calling semantic consistency with previous
\r
298 /*-----------------------------------------------------------*/
\r
300 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
302 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
\r
304 Queue_t *pxNewQueue = NULL;
\r
306 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
308 /* The StaticQueue_t structure and the queue storage area must be
\r
310 configASSERT( pxStaticQueue != NULL );
\r
312 /* A queue storage area should be provided if the item size is not 0, and
\r
313 should not be provided if the item size is 0. */
\r
314 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
\r
315 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
\r
317 #if( configASSERT_DEFINED == 1 )
\r
319 /* Sanity check that the size of the structure used to declare a
\r
320 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
\r
321 the real queue and semaphore structures. */
\r
322 volatile size_t xSize = sizeof( StaticQueue_t );
\r
323 configASSERT( xSize == sizeof( Queue_t ) );
\r
325 #endif /* configASSERT_DEFINED */
\r
327 /* The address of a statically allocated queue was passed in, use it.
\r
328 The address of a statically allocated storage area was also passed in
\r
329 but is already set. */
\r
330 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
332 if( pxNewQueue != NULL )
\r
334 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
336 /* Queues can be allocated wither statically or dynamically, so
\r
337 note this queue was allocated statically in case the queue is
\r
339 pxNewQueue->ucStaticallyAllocated = pdTRUE;
\r
341 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
343 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
347 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
348 mtCOVERAGE_TEST_MARKER();
\r
354 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
355 /*-----------------------------------------------------------*/
\r
357 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
359 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
361 Queue_t *pxNewQueue;
\r
362 size_t xQueueSizeInBytes;
\r
363 uint8_t *pucQueueStorage;
\r
365 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
367 if( uxItemSize == ( UBaseType_t ) 0 )
\r
369 /* There is not going to be a queue storage area. */
\r
370 xQueueSizeInBytes = ( size_t ) 0;
\r
374 /* Allocate enough space to hold the maximum number of items that
\r
375 can be in the queue at any time. */
\r
376 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
379 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
381 if( pxNewQueue != NULL )
\r
383 /* Jump past the queue structure to find the location of the queue
\r
385 pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
387 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
389 /* Queues can be created either statically or dynamically, so
\r
390 note this task was created dynamically in case it is later
\r
392 pxNewQueue->ucStaticallyAllocated = pdFALSE;
\r
394 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
396 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
400 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
401 mtCOVERAGE_TEST_MARKER();
\r
407 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
408 /*-----------------------------------------------------------*/
\r
410 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
\r
412 /* Remove compiler warnings about unused parameters should
\r
413 configUSE_TRACE_FACILITY not be set to 1. */
\r
414 ( void ) ucQueueType;
\r
416 if( uxItemSize == ( UBaseType_t ) 0 )
\r
418 /* No RAM was allocated for the queue storage area, but PC head cannot
\r
419 be set to NULL because NULL is used as a key to say the queue is used as
\r
420 a mutex. Therefore just set pcHead to point to the queue as a benign
\r
421 value that is known to be within the memory map. */
\r
422 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
426 /* Set the head to the start of the queue storage area. */
\r
427 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
\r
430 /* Initialise the queue members as described where the queue type is
\r
432 pxNewQueue->uxLength = uxQueueLength;
\r
433 pxNewQueue->uxItemSize = uxItemSize;
\r
434 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
436 #if ( configUSE_TRACE_FACILITY == 1 )
\r
438 pxNewQueue->ucQueueType = ucQueueType;
\r
440 #endif /* configUSE_TRACE_FACILITY */
\r
442 #if( configUSE_QUEUE_SETS == 1 )
\r
444 pxNewQueue->pxQueueSetContainer = NULL;
\r
446 #endif /* configUSE_QUEUE_SETS */
\r
448 traceQUEUE_CREATE( pxNewQueue );
\r
450 /*-----------------------------------------------------------*/
\r
452 #if( configUSE_MUTEXES == 1 )
\r
454 static void prvInitialiseMutex( Queue_t *pxNewQueue )
\r
456 if( pxNewQueue != NULL )
\r
458 /* The queue create function will set all the queue structure members
\r
459 correctly for a generic queue, but this function is creating a
\r
460 mutex. Overwrite those members that need to be set differently -
\r
461 in particular the information required for priority inheritance. */
\r
462 pxNewQueue->pxMutexHolder = NULL;
\r
463 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
465 /* In case this is a recursive mutex. */
\r
466 pxNewQueue->u.uxRecursiveCallCount = 0;
\r
468 traceCREATE_MUTEX( pxNewQueue );
\r
470 /* Start with the semaphore in the expected state. */
\r
471 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
475 traceCREATE_MUTEX_FAILED();
\r
479 #endif /* configUSE_MUTEXES */
\r
480 /*-----------------------------------------------------------*/
\r
482 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
484 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
486 Queue_t *pxNewQueue;
\r
487 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
489 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
\r
490 prvInitialiseMutex( pxNewQueue );
\r
495 #endif /* configUSE_MUTEXES */
\r
496 /*-----------------------------------------------------------*/
\r
498 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
500 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
\r
502 Queue_t *pxNewQueue;
\r
503 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
505 /* Prevent compiler warnings about unused parameters if
\r
506 configUSE_TRACE_FACILITY does not equal 1. */
\r
507 ( void ) ucQueueType;
\r
509 pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
\r
510 prvInitialiseMutex( pxNewQueue );
\r
515 #endif /* configUSE_MUTEXES */
\r
516 /*-----------------------------------------------------------*/
\r
518 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
520 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
524 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
525 be called directly. Note: This is a good way of determining if the
\r
526 calling task is the mutex holder, but not a good way of determining the
\r
527 identity of the mutex holder, as the holder may change between the
\r
528 following critical section exiting and the function returning. */
\r
529 taskENTER_CRITICAL();
\r
531 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
533 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
540 taskEXIT_CRITICAL();
\r
543 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
546 /*-----------------------------------------------------------*/
\r
548 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
550 void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
\r
554 configASSERT( xSemaphore );
\r
556 /* Mutexes cannot be used in interrupt service routines, so the mutex
\r
557 holder should not change in an ISR, and therefore a critical section is
\r
558 not required here. */
\r
559 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
561 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
569 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
572 /*-----------------------------------------------------------*/
\r
574 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
576 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
578 BaseType_t xReturn;
\r
579 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
581 configASSERT( pxMutex );
\r
583 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
584 change outside of this task. If this task does not hold the mutex then
\r
585 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
586 this is the only condition we are interested in it does not matter if
\r
587 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
588 mutual exclusion is required to test the pxMutexHolder variable. */
\r
589 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
591 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
593 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
594 the task handle, therefore no underflow check is required. Also,
\r
595 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
596 there can only be one, no mutual exclusion is required to modify the
\r
597 uxRecursiveCallCount member. */
\r
598 ( pxMutex->u.uxRecursiveCallCount )--;
\r
600 /* Has the recursive call count unwound to 0? */
\r
601 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
603 /* Return the mutex. This will automatically unblock any other
\r
604 task that might be waiting to access the mutex. */
\r
605 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
609 mtCOVERAGE_TEST_MARKER();
\r
616 /* The mutex cannot be given because the calling task is not the
\r
620 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
626 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
627 /*-----------------------------------------------------------*/
\r
629 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
631 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
633 BaseType_t xReturn;
\r
634 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
636 configASSERT( pxMutex );
\r
638 /* Comments regarding mutual exclusion as per those within
\r
639 xQueueGiveMutexRecursive(). */
\r
641 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
643 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
645 ( pxMutex->u.uxRecursiveCallCount )++;
\r
650 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
\r
652 /* pdPASS will only be returned if the mutex was successfully
\r
653 obtained. The calling task may have entered the Blocked state
\r
654 before reaching here. */
\r
655 if( xReturn != pdFAIL )
\r
657 ( pxMutex->u.uxRecursiveCallCount )++;
\r
661 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
668 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
669 /*-----------------------------------------------------------*/
\r
671 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
673 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
\r
675 QueueHandle_t xHandle;
\r
677 configASSERT( uxMaxCount != 0 );
\r
678 configASSERT( uxInitialCount <= uxMaxCount );
\r
680 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
682 if( xHandle != NULL )
\r
684 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
686 traceCREATE_COUNTING_SEMAPHORE();
\r
690 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
696 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
697 /*-----------------------------------------------------------*/
\r
699 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
701 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
703 QueueHandle_t xHandle;
\r
705 configASSERT( uxMaxCount != 0 );
\r
706 configASSERT( uxInitialCount <= uxMaxCount );
\r
708 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
710 if( xHandle != NULL )
\r
712 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
714 traceCREATE_COUNTING_SEMAPHORE();
\r
718 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
724 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
725 /*-----------------------------------------------------------*/
\r
727 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
729 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
730 TimeOut_t xTimeOut;
\r
731 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
733 configASSERT( pxQueue );
\r
734 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
735 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
736 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
738 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
743 /* This function relaxes the coding standard somewhat to allow return
\r
744 statements within the function itself. This is done in the interest
\r
745 of execution time efficiency. */
\r
748 taskENTER_CRITICAL();
\r
750 /* Is there room on the queue now? The running task must be the
\r
751 highest priority task wanting to access the queue. If the head item
\r
752 in the queue is to be overwritten then it does not matter if the
\r
754 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
756 traceQUEUE_SEND( pxQueue );
\r
758 #if ( configUSE_QUEUE_SETS == 1 )
\r
760 UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
762 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
764 if( pxQueue->pxQueueSetContainer != NULL )
\r
766 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
\r
768 /* Do not notify the queue set as an existing item
\r
769 was overwritten in the queue so the number of items
\r
770 in the queue has not changed. */
\r
771 mtCOVERAGE_TEST_MARKER();
\r
773 else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
775 /* The queue is a member of a queue set, and posting
\r
776 to the queue set caused a higher priority task to
\r
777 unblock. A context switch is required. */
\r
778 queueYIELD_IF_USING_PREEMPTION();
\r
782 mtCOVERAGE_TEST_MARKER();
\r
787 /* If there was a task waiting for data to arrive on the
\r
788 queue then unblock it now. */
\r
789 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
791 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
793 /* The unblocked task has a priority higher than
\r
794 our own so yield immediately. Yes it is ok to
\r
795 do this from within the critical section - the
\r
796 kernel takes care of that. */
\r
797 queueYIELD_IF_USING_PREEMPTION();
\r
801 mtCOVERAGE_TEST_MARKER();
\r
804 else if( xYieldRequired != pdFALSE )
\r
806 /* This path is a special case that will only get
\r
807 executed if the task was holding multiple mutexes
\r
808 and the mutexes were given back in an order that is
\r
809 different to that in which they were taken. */
\r
810 queueYIELD_IF_USING_PREEMPTION();
\r
814 mtCOVERAGE_TEST_MARKER();
\r
818 #else /* configUSE_QUEUE_SETS */
\r
820 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
822 /* If there was a task waiting for data to arrive on the
\r
823 queue then unblock it now. */
\r
824 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
826 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
828 /* The unblocked task has a priority higher than
\r
829 our own so yield immediately. Yes it is ok to do
\r
830 this from within the critical section - the kernel
\r
831 takes care of that. */
\r
832 queueYIELD_IF_USING_PREEMPTION();
\r
836 mtCOVERAGE_TEST_MARKER();
\r
839 else if( xYieldRequired != pdFALSE )
\r
841 /* This path is a special case that will only get
\r
842 executed if the task was holding multiple mutexes and
\r
843 the mutexes were given back in an order that is
\r
844 different to that in which they were taken. */
\r
845 queueYIELD_IF_USING_PREEMPTION();
\r
849 mtCOVERAGE_TEST_MARKER();
\r
852 #endif /* configUSE_QUEUE_SETS */
\r
854 taskEXIT_CRITICAL();
\r
859 if( xTicksToWait == ( TickType_t ) 0 )
\r
861 /* The queue was full and no block time is specified (or
\r
862 the block time has expired) so leave now. */
\r
863 taskEXIT_CRITICAL();
\r
865 /* Return to the original privilege level before exiting
\r
867 traceQUEUE_SEND_FAILED( pxQueue );
\r
868 return errQUEUE_FULL;
\r
870 else if( xEntryTimeSet == pdFALSE )
\r
872 /* The queue was full and a block time was specified so
\r
873 configure the timeout structure. */
\r
874 vTaskInternalSetTimeOutState( &xTimeOut );
\r
875 xEntryTimeSet = pdTRUE;
\r
879 /* Entry time was already set. */
\r
880 mtCOVERAGE_TEST_MARKER();
\r
884 taskEXIT_CRITICAL();
\r
886 /* Interrupts and other tasks can send to and receive from the queue
\r
887 now the critical section has been exited. */
\r
890 prvLockQueue( pxQueue );
\r
892 /* Update the timeout state to see if it has expired yet. */
\r
893 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
895 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
897 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
898 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
900 /* Unlocking the queue means queue events can effect the
\r
901 event list. It is possible that interrupts occurring now
\r
902 remove this task from the event list again - but as the
\r
903 scheduler is suspended the task will go onto the pending
\r
904 ready last instead of the actual ready list. */
\r
905 prvUnlockQueue( pxQueue );
\r
907 /* Resuming the scheduler will move tasks from the pending
\r
908 ready list into the ready list - so it is feasible that this
\r
909 task is already in a ready list before it yields - in which
\r
910 case the yield will not cause a context switch unless there
\r
911 is also a higher priority task in the pending ready list. */
\r
912 if( xTaskResumeAll() == pdFALSE )
\r
914 portYIELD_WITHIN_API();
\r
920 prvUnlockQueue( pxQueue );
\r
921 ( void ) xTaskResumeAll();
\r
926 /* The timeout has expired. */
\r
927 prvUnlockQueue( pxQueue );
\r
928 ( void ) xTaskResumeAll();
\r
930 traceQUEUE_SEND_FAILED( pxQueue );
\r
931 return errQUEUE_FULL;
\r
935 /*-----------------------------------------------------------*/
\r
937 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
939 BaseType_t xReturn;
\r
940 UBaseType_t uxSavedInterruptStatus;
\r
941 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
943 configASSERT( pxQueue );
\r
944 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
945 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
947 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
948 system call (or maximum API call) interrupt priority. Interrupts that are
\r
949 above the maximum system call priority are kept permanently enabled, even
\r
950 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
951 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
952 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
953 failure if a FreeRTOS API function is called from an interrupt that has been
\r
954 assigned a priority above the configured maximum system call priority.
\r
955 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
956 that have been assigned a priority at or (logically) below the maximum
\r
957 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
958 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
959 More information (albeit Cortex-M specific) is provided on the following
\r
960 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
961 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
963 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
964 in the queue. Also don't directly wake a task that was blocked on a queue
\r
965 read, instead return a flag to say whether a context switch is required or
\r
966 not (i.e. has a task with a higher priority than us been woken by this
\r
968 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
970 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
972 const int8_t cTxLock = pxQueue->cTxLock;
\r
974 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
976 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
977 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
978 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
979 called here even though the disinherit function does not check if
\r
980 the scheduler is suspended before accessing the ready lists. */
\r
981 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
983 /* The event list is not altered if the queue is locked. This will
\r
984 be done when the queue is unlocked later. */
\r
985 if( cTxLock == queueUNLOCKED )
\r
987 #if ( configUSE_QUEUE_SETS == 1 )
\r
989 if( pxQueue->pxQueueSetContainer != NULL )
\r
991 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
993 /* The queue is a member of a queue set, and posting
\r
994 to the queue set caused a higher priority task to
\r
995 unblock. A context switch is required. */
\r
996 if( pxHigherPriorityTaskWoken != NULL )
\r
998 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1002 mtCOVERAGE_TEST_MARKER();
\r
1007 mtCOVERAGE_TEST_MARKER();
\r
1012 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1014 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1016 /* The task waiting has a higher priority so
\r
1017 record that a context switch is required. */
\r
1018 if( pxHigherPriorityTaskWoken != NULL )
\r
1020 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1024 mtCOVERAGE_TEST_MARKER();
\r
1029 mtCOVERAGE_TEST_MARKER();
\r
1034 mtCOVERAGE_TEST_MARKER();
\r
1038 #else /* configUSE_QUEUE_SETS */
\r
1040 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1042 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1044 /* The task waiting has a higher priority so record that a
\r
1045 context switch is required. */
\r
1046 if( pxHigherPriorityTaskWoken != NULL )
\r
1048 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1052 mtCOVERAGE_TEST_MARKER();
\r
1057 mtCOVERAGE_TEST_MARKER();
\r
1062 mtCOVERAGE_TEST_MARKER();
\r
1065 #endif /* configUSE_QUEUE_SETS */
\r
1069 /* Increment the lock count so the task that unlocks the queue
\r
1070 knows that data was posted while it was locked. */
\r
1071 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1078 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1079 xReturn = errQUEUE_FULL;
\r
1082 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1086 /*-----------------------------------------------------------*/
\r
1088 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1090 BaseType_t xReturn;
\r
1091 UBaseType_t uxSavedInterruptStatus;
\r
1092 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1094 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1095 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1096 read, instead return a flag to say whether a context switch is required or
\r
1097 not (i.e. has a task with a higher priority than us been woken by this
\r
1100 configASSERT( pxQueue );
\r
1102 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1103 if the item size is not 0. */
\r
1104 configASSERT( pxQueue->uxItemSize == 0 );
\r
1106 /* Normally a mutex would not be given from an interrupt, especially if
\r
1107 there is a mutex holder, as priority inheritance makes no sense for an
\r
1108 interrupts, only tasks. */
\r
1109 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1111 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1112 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1113 above the maximum system call priority are kept permanently enabled, even
\r
1114 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1115 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1116 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1117 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1118 assigned a priority above the configured maximum system call priority.
\r
1119 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1120 that have been assigned a priority at or (logically) below the maximum
\r
1121 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1122 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1123 More information (albeit Cortex-M specific) is provided on the following
\r
1124 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1125 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1127 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1129 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1131 /* When the queue is used to implement a semaphore no data is ever
\r
1132 moved through the queue but it is still valid to see if the queue 'has
\r
1134 if( uxMessagesWaiting < pxQueue->uxLength )
\r
1136 const int8_t cTxLock = pxQueue->cTxLock;
\r
1138 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1140 /* A task can only have an inherited priority if it is a mutex
\r
1141 holder - and if there is a mutex holder then the mutex cannot be
\r
1142 given from an ISR. As this is the ISR version of the function it
\r
1143 can be assumed there is no mutex holder and no need to determine if
\r
1144 priority disinheritance is needed. Simply increase the count of
\r
1145 messages (semaphores) available. */
\r
1146 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
1148 /* The event list is not altered if the queue is locked. This will
\r
1149 be done when the queue is unlocked later. */
\r
1150 if( cTxLock == queueUNLOCKED )
\r
1152 #if ( configUSE_QUEUE_SETS == 1 )
\r
1154 if( pxQueue->pxQueueSetContainer != NULL )
\r
1156 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
1158 /* The semaphore is a member of a queue set, and
\r
1159 posting to the queue set caused a higher priority
\r
1160 task to unblock. A context switch is required. */
\r
1161 if( pxHigherPriorityTaskWoken != NULL )
\r
1163 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1167 mtCOVERAGE_TEST_MARKER();
\r
1172 mtCOVERAGE_TEST_MARKER();
\r
1177 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1179 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1181 /* The task waiting has a higher priority so
\r
1182 record that a context switch is required. */
\r
1183 if( pxHigherPriorityTaskWoken != NULL )
\r
1185 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1189 mtCOVERAGE_TEST_MARKER();
\r
1194 mtCOVERAGE_TEST_MARKER();
\r
1199 mtCOVERAGE_TEST_MARKER();
\r
1203 #else /* configUSE_QUEUE_SETS */
\r
1205 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1207 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1209 /* The task waiting has a higher priority so record that a
\r
1210 context switch is required. */
\r
1211 if( pxHigherPriorityTaskWoken != NULL )
\r
1213 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1217 mtCOVERAGE_TEST_MARKER();
\r
1222 mtCOVERAGE_TEST_MARKER();
\r
1227 mtCOVERAGE_TEST_MARKER();
\r
1230 #endif /* configUSE_QUEUE_SETS */
\r
1234 /* Increment the lock count so the task that unlocks the queue
\r
1235 knows that data was posted while it was locked. */
\r
1236 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1243 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1244 xReturn = errQUEUE_FULL;
\r
1247 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1251 /*-----------------------------------------------------------*/
\r
1253 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1255 BaseType_t xEntryTimeSet = pdFALSE;
\r
1256 TimeOut_t xTimeOut;
\r
1257 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1259 /* Check the pointer is not NULL. */
\r
1260 configASSERT( ( pxQueue ) );
\r
1262 /* The buffer into which data is received can only be NULL if the data size
\r
1263 is zero (so no data is copied into the buffer. */
\r
1264 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1266 /* Cannot block if the scheduler is suspended. */
\r
1267 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1269 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1274 /* This function relaxes the coding standard somewhat to allow return
\r
1275 statements within the function itself. This is done in the interest
\r
1276 of execution time efficiency. */
\r
1280 taskENTER_CRITICAL();
\r
1282 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1284 /* Is there data in the queue now? To be running the calling task
\r
1285 must be the highest priority task wanting to access the queue. */
\r
1286 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1288 /* Data available, remove one item. */
\r
1289 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1290 traceQUEUE_RECEIVE( pxQueue );
\r
1291 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1293 /* There is now space in the queue, were any tasks waiting to
\r
1294 post to the queue? If so, unblock the highest priority waiting
\r
1296 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1298 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1300 queueYIELD_IF_USING_PREEMPTION();
\r
1304 mtCOVERAGE_TEST_MARKER();
\r
1309 mtCOVERAGE_TEST_MARKER();
\r
1312 taskEXIT_CRITICAL();
\r
1317 if( xTicksToWait == ( TickType_t ) 0 )
\r
1319 /* The queue was empty and no block time is specified (or
\r
1320 the block time has expired) so leave now. */
\r
1321 taskEXIT_CRITICAL();
\r
1322 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1323 return errQUEUE_EMPTY;
\r
1325 else if( xEntryTimeSet == pdFALSE )
\r
1327 /* The queue was empty and a block time was specified so
\r
1328 configure the timeout structure. */
\r
1329 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1330 xEntryTimeSet = pdTRUE;
\r
1334 /* Entry time was already set. */
\r
1335 mtCOVERAGE_TEST_MARKER();
\r
1339 taskEXIT_CRITICAL();
\r
1341 /* Interrupts and other tasks can send to and receive from the queue
\r
1342 now the critical section has been exited. */
\r
1344 vTaskSuspendAll();
\r
1345 prvLockQueue( pxQueue );
\r
1347 /* Update the timeout state to see if it has expired yet. */
\r
1348 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1350 /* The timeout has not expired. If the queue is still empty place
\r
1351 the task on the list of tasks waiting to receive from the queue. */
\r
1352 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1354 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1355 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1356 prvUnlockQueue( pxQueue );
\r
1357 if( xTaskResumeAll() == pdFALSE )
\r
1359 portYIELD_WITHIN_API();
\r
1363 mtCOVERAGE_TEST_MARKER();
\r
1368 /* The queue contains data again. Loop back to try and read the
\r
1370 prvUnlockQueue( pxQueue );
\r
1371 ( void ) xTaskResumeAll();
\r
1376 /* Timed out. If there is no data in the queue exit, otherwise loop
\r
1377 back and attempt to read the data. */
\r
1378 prvUnlockQueue( pxQueue );
\r
1379 ( void ) xTaskResumeAll();
\r
1381 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1383 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1384 return errQUEUE_EMPTY;
\r
1388 mtCOVERAGE_TEST_MARKER();
\r
1393 /*-----------------------------------------------------------*/
\r
1395 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
1397 BaseType_t xEntryTimeSet = pdFALSE;
\r
1398 TimeOut_t xTimeOut;
\r
1399 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1401 #if( configUSE_MUTEXES == 1 )
\r
1402 BaseType_t xInheritanceOccurred = pdFALSE;
\r
1405 /* Check the queue pointer is not NULL. */
\r
1406 configASSERT( ( pxQueue ) );
\r
1408 /* Check this really is a semaphore, in which case the item size will be
\r
1410 configASSERT( pxQueue->uxItemSize == 0 );
\r
1412 /* Cannot block if the scheduler is suspended. */
\r
1413 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1415 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1420 /* This function relaxes the coding standard somewhat to allow return
\r
1421 statements within the function itself. This is done in the interest
\r
1422 of execution time efficiency. */
\r
1426 taskENTER_CRITICAL();
\r
1428 /* Semaphores are queues with an item size of 0, and where the
\r
1429 number of messages in the queue is the semaphore's count value. */
\r
1430 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
\r
1432 /* Is there data in the queue now? To be running the calling task
\r
1433 must be the highest priority task wanting to access the queue. */
\r
1434 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
\r
1436 traceQUEUE_RECEIVE( pxQueue );
\r
1438 /* Semaphores are queues with a data size of zero and where the
\r
1439 messages waiting is the semaphore's count. Reduce the count. */
\r
1440 pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
\r
1442 #if ( configUSE_MUTEXES == 1 )
\r
1444 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1446 /* Record the information required to implement
\r
1447 priority inheritance should it become necessary. */
\r
1448 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1452 mtCOVERAGE_TEST_MARKER();
\r
1455 #endif /* configUSE_MUTEXES */
\r
1457 /* Check to see if other tasks are blocked waiting to give the
\r
1458 semaphore, and if so, unblock the highest priority such task. */
\r
1459 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1461 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1463 queueYIELD_IF_USING_PREEMPTION();
\r
1467 mtCOVERAGE_TEST_MARKER();
\r
1472 mtCOVERAGE_TEST_MARKER();
\r
1475 taskEXIT_CRITICAL();
\r
1480 if( xTicksToWait == ( TickType_t ) 0 )
\r
1482 /* For inheritance to have occurred there must have been an
\r
1483 initial timeout, and an adjusted timeout cannot become 0, as
\r
1484 if it were 0 the function would have exited. */
\r
1485 #if( configUSE_MUTEXES == 1 )
\r
1487 configASSERT( xInheritanceOccurred == pdFALSE );
\r
1489 #endif /* configUSE_MUTEXES */
\r
1491 /* The semaphore count was 0 and no block time is specified
\r
1492 (or the block time has expired) so exit now. */
\r
1493 taskEXIT_CRITICAL();
\r
1494 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1495 return errQUEUE_EMPTY;
\r
1497 else if( xEntryTimeSet == pdFALSE )
\r
1499 /* The semaphore count was 0 and a block time was specified
\r
1500 so configure the timeout structure ready to block. */
\r
1501 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1502 xEntryTimeSet = pdTRUE;
\r
1506 /* Entry time was already set. */
\r
1507 mtCOVERAGE_TEST_MARKER();
\r
1511 taskEXIT_CRITICAL();
\r
1513 /* Interrupts and other tasks can give to and take from the semaphore
\r
1514 now the critical section has been exited. */
\r
1516 vTaskSuspendAll();
\r
1517 prvLockQueue( pxQueue );
\r
1519 /* Update the timeout state to see if it has expired yet. */
\r
1520 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1522 /* A block time is specified and not expired. If the semaphore
\r
1523 count is 0 then enter the Blocked state to wait for a semaphore to
\r
1524 become available. As semaphores are implemented with queues the
\r
1525 queue being empty is equivalent to the semaphore count being 0. */
\r
1526 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1528 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1530 #if ( configUSE_MUTEXES == 1 )
\r
1532 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1534 taskENTER_CRITICAL();
\r
1536 xInheritanceOccurred = xTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1538 taskEXIT_CRITICAL();
\r
1542 mtCOVERAGE_TEST_MARKER();
\r
1547 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1548 prvUnlockQueue( pxQueue );
\r
1549 if( xTaskResumeAll() == pdFALSE )
\r
1551 portYIELD_WITHIN_API();
\r
1555 mtCOVERAGE_TEST_MARKER();
\r
1560 /* There was no timeout and the semaphore count was not 0, so
\r
1561 attempt to take the semaphore again. */
\r
1562 prvUnlockQueue( pxQueue );
\r
1563 ( void ) xTaskResumeAll();
\r
1569 prvUnlockQueue( pxQueue );
\r
1570 ( void ) xTaskResumeAll();
\r
1572 /* If the semaphore count is 0 exit now as the timeout has
\r
1573 expired. Otherwise return to attempt to take the semaphore that is
\r
1574 known to be available. As semaphores are implemented by queues the
\r
1575 queue being empty is equivalent to the semaphore count being 0. */
\r
1576 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1578 #if ( configUSE_MUTEXES == 1 )
\r
1580 /* xInheritanceOccurred could only have be set if
\r
1581 pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
\r
1582 test the mutex type again to check it is actually a mutex. */
\r
1583 if( xInheritanceOccurred != pdFALSE )
\r
1585 taskENTER_CRITICAL();
\r
1587 UBaseType_t uxHighestWaitingPriority;
\r
1589 /* This task blocking on the mutex caused another
\r
1590 task to inherit this task's priority. Now this task
\r
1591 has timed out the priority should be disinherited
\r
1592 again, but only as low as the next highest priority
\r
1593 task that is waiting for the same mutex. */
\r
1594 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
\r
1595 vTaskPriorityDisinheritAfterTimeout( ( void * ) pxQueue->pxMutexHolder, uxHighestWaitingPriority );
\r
1597 taskEXIT_CRITICAL();
\r
1600 #endif /* configUSE_MUTEXES */
\r
1602 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1603 return errQUEUE_EMPTY;
\r
1607 mtCOVERAGE_TEST_MARKER();
\r
1612 /*-----------------------------------------------------------*/
\r
1614 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1616 BaseType_t xEntryTimeSet = pdFALSE;
\r
1617 TimeOut_t xTimeOut;
\r
1618 int8_t *pcOriginalReadPosition;
\r
1619 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1621 /* Check the pointer is not NULL. */
\r
1622 configASSERT( ( pxQueue ) );
\r
1624 /* The buffer into which data is received can only be NULL if the data size
\r
1625 is zero (so no data is copied into the buffer. */
\r
1626 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1628 /* Cannot block if the scheduler is suspended. */
\r
1629 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1631 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1636 /* This function relaxes the coding standard somewhat to allow return
\r
1637 statements within the function itself. This is done in the interest
\r
1638 of execution time efficiency. */
\r
1642 taskENTER_CRITICAL();
\r
1644 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1646 /* Is there data in the queue now? To be running the calling task
\r
1647 must be the highest priority task wanting to access the queue. */
\r
1648 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1650 /* Remember the read position so it can be reset after the data
\r
1651 is read from the queue as this function is only peeking the
\r
1652 data, not removing it. */
\r
1653 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1655 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1656 traceQUEUE_PEEK( pxQueue );
\r
1658 /* The data is not being removed, so reset the read pointer. */
\r
1659 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1661 /* The data is being left in the queue, so see if there are
\r
1662 any other tasks waiting for the data. */
\r
1663 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1665 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1667 /* The task waiting has a higher priority than this task. */
\r
1668 queueYIELD_IF_USING_PREEMPTION();
\r
1672 mtCOVERAGE_TEST_MARKER();
\r
1677 mtCOVERAGE_TEST_MARKER();
\r
1680 taskEXIT_CRITICAL();
\r
1685 if( xTicksToWait == ( TickType_t ) 0 )
\r
1687 /* The queue was empty and no block time is specified (or
\r
1688 the block time has expired) so leave now. */
\r
1689 taskEXIT_CRITICAL();
\r
1690 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1691 return errQUEUE_EMPTY;
\r
1693 else if( xEntryTimeSet == pdFALSE )
\r
1695 /* The queue was empty and a block time was specified so
\r
1696 configure the timeout structure ready to enter the blocked
\r
1698 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1699 xEntryTimeSet = pdTRUE;
\r
1703 /* Entry time was already set. */
\r
1704 mtCOVERAGE_TEST_MARKER();
\r
1708 taskEXIT_CRITICAL();
\r
1710 /* Interrupts and other tasks can send to and receive from the queue
\r
1711 now the critical section has been exited. */
\r
1713 vTaskSuspendAll();
\r
1714 prvLockQueue( pxQueue );
\r
1716 /* Update the timeout state to see if it has expired yet. */
\r
1717 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1719 /* Timeout has not expired yet, check to see if there is data in the
\r
1720 queue now, and if not enter the Blocked state to wait for data. */
\r
1721 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1723 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
\r
1724 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1725 prvUnlockQueue( pxQueue );
\r
1726 if( xTaskResumeAll() == pdFALSE )
\r
1728 portYIELD_WITHIN_API();
\r
1732 mtCOVERAGE_TEST_MARKER();
\r
1737 /* There is data in the queue now, so don't enter the blocked
\r
1738 state, instead return to try and obtain the data. */
\r
1739 prvUnlockQueue( pxQueue );
\r
1740 ( void ) xTaskResumeAll();
\r
1745 /* The timeout has expired. If there is still no data in the queue
\r
1746 exit, otherwise go back and try to read the data again. */
\r
1747 prvUnlockQueue( pxQueue );
\r
1748 ( void ) xTaskResumeAll();
\r
1750 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1752 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1753 return errQUEUE_EMPTY;
\r
1757 mtCOVERAGE_TEST_MARKER();
\r
1762 /*-----------------------------------------------------------*/
\r
1764 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1766 BaseType_t xReturn;
\r
1767 UBaseType_t uxSavedInterruptStatus;
\r
1768 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1770 configASSERT( pxQueue );
\r
1771 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1773 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1774 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1775 above the maximum system call priority are kept permanently enabled, even
\r
1776 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1777 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1778 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1779 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1780 assigned a priority above the configured maximum system call priority.
\r
1781 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1782 that have been assigned a priority at or (logically) below the maximum
\r
1783 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1784 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1785 More information (albeit Cortex-M specific) is provided on the following
\r
1786 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1787 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1789 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1791 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1793 /* Cannot block in an ISR, so check there is data available. */
\r
1794 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1796 const int8_t cRxLock = pxQueue->cRxLock;
\r
1798 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1800 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1801 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1803 /* If the queue is locked the event list will not be modified.
\r
1804 Instead update the lock count so the task that unlocks the queue
\r
1805 will know that an ISR has removed data while the queue was
\r
1807 if( cRxLock == queueUNLOCKED )
\r
1809 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1811 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1813 /* The task waiting has a higher priority than us so
\r
1814 force a context switch. */
\r
1815 if( pxHigherPriorityTaskWoken != NULL )
\r
1817 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1821 mtCOVERAGE_TEST_MARKER();
\r
1826 mtCOVERAGE_TEST_MARKER();
\r
1831 mtCOVERAGE_TEST_MARKER();
\r
1836 /* Increment the lock count so the task that unlocks the queue
\r
1837 knows that data was removed while it was locked. */
\r
1838 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
\r
1846 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1849 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1853 /*-----------------------------------------------------------*/
\r
1855 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1857 BaseType_t xReturn;
\r
1858 UBaseType_t uxSavedInterruptStatus;
\r
1859 int8_t *pcOriginalReadPosition;
\r
1860 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1862 configASSERT( pxQueue );
\r
1863 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1864 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1866 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1867 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1868 above the maximum system call priority are kept permanently enabled, even
\r
1869 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1870 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1871 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1872 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1873 assigned a priority above the configured maximum system call priority.
\r
1874 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1875 that have been assigned a priority at or (logically) below the maximum
\r
1876 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1877 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1878 More information (albeit Cortex-M specific) is provided on the following
\r
1879 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1880 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1882 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1884 /* Cannot block in an ISR, so check there is data available. */
\r
1885 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1887 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1889 /* Remember the read position so it can be reset as nothing is
\r
1890 actually being removed from the queue. */
\r
1891 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1892 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1893 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1900 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1903 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1907 /*-----------------------------------------------------------*/
\r
1909 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1911 UBaseType_t uxReturn;
\r
1913 configASSERT( xQueue );
\r
1915 taskENTER_CRITICAL();
\r
1917 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1919 taskEXIT_CRITICAL();
\r
1922 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1923 /*-----------------------------------------------------------*/
\r
1925 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1927 UBaseType_t uxReturn;
\r
1930 pxQueue = ( Queue_t * ) xQueue;
\r
1931 configASSERT( pxQueue );
\r
1933 taskENTER_CRITICAL();
\r
1935 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1937 taskEXIT_CRITICAL();
\r
1940 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1941 /*-----------------------------------------------------------*/
\r
1943 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1945 UBaseType_t uxReturn;
\r
1947 configASSERT( xQueue );
\r
1949 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1952 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1953 /*-----------------------------------------------------------*/
\r
1955 void vQueueDelete( QueueHandle_t xQueue )
\r
1957 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1959 configASSERT( pxQueue );
\r
1960 traceQUEUE_DELETE( pxQueue );
\r
1962 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1964 vQueueUnregisterQueue( pxQueue );
\r
1968 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
\r
1970 /* The queue can only have been allocated dynamically - free it
\r
1972 vPortFree( pxQueue );
\r
1974 #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
1976 /* The queue could have been allocated statically or dynamically, so
\r
1977 check before attempting to free the memory. */
\r
1978 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
\r
1980 vPortFree( pxQueue );
\r
1984 mtCOVERAGE_TEST_MARKER();
\r
1989 /* The queue must have been statically allocated, so is not going to be
\r
1990 deleted. Avoid compiler warnings about the unused parameter. */
\r
1993 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
1995 /*-----------------------------------------------------------*/
\r
1997 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1999 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
2001 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
2004 #endif /* configUSE_TRACE_FACILITY */
\r
2005 /*-----------------------------------------------------------*/
\r
2007 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2009 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
2011 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
2014 #endif /* configUSE_TRACE_FACILITY */
\r
2015 /*-----------------------------------------------------------*/
\r
2017 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2019 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
2021 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
2024 #endif /* configUSE_TRACE_FACILITY */
\r
2025 /*-----------------------------------------------------------*/
\r
2027 #if( configUSE_MUTEXES == 1 )
\r
2029 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
\r
2031 UBaseType_t uxHighestPriorityOfWaitingTasks;
\r
2033 /* If a task waiting for a mutex causes the mutex holder to inherit a
\r
2034 priority, but the waiting task times out, then the holder should
\r
2035 disinherit the priority - but only down to the highest priority of any
\r
2036 other tasks that are waiting for the same mutex. For this purpose,
\r
2037 return the priority of the highest priority task that is waiting for the
\r
2039 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0 )
\r
2041 uxHighestPriorityOfWaitingTasks = configMAX_PRIORITIES - listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
\r
2045 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
\r
2048 return uxHighestPriorityOfWaitingTasks;
\r
2051 #endif /* configUSE_MUTEXES */
\r
2052 /*-----------------------------------------------------------*/
\r
2054 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
2056 BaseType_t xReturn = pdFALSE;
\r
2057 UBaseType_t uxMessagesWaiting;
\r
2059 /* This function is called from a critical section. */
\r
2061 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
2063 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
2065 #if ( configUSE_MUTEXES == 1 )
\r
2067 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
2069 /* The mutex is no longer being held. */
\r
2070 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
2071 pxQueue->pxMutexHolder = NULL;
\r
2075 mtCOVERAGE_TEST_MARKER();
\r
2078 #endif /* configUSE_MUTEXES */
\r
2080 else if( xPosition == queueSEND_TO_BACK )
\r
2082 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
2083 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
2084 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2086 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
2090 mtCOVERAGE_TEST_MARKER();
\r
2095 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2096 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
2097 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2099 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
2103 mtCOVERAGE_TEST_MARKER();
\r
2106 if( xPosition == queueOVERWRITE )
\r
2108 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2110 /* An item is not being added but overwritten, so subtract
\r
2111 one from the recorded number of items in the queue so when
\r
2112 one is added again below the number of recorded items remains
\r
2114 --uxMessagesWaiting;
\r
2118 mtCOVERAGE_TEST_MARKER();
\r
2123 mtCOVERAGE_TEST_MARKER();
\r
2127 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
2131 /*-----------------------------------------------------------*/
\r
2133 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
2135 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
2137 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2138 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
2140 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2144 mtCOVERAGE_TEST_MARKER();
\r
2146 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
2149 /*-----------------------------------------------------------*/
\r
2151 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
2153 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
2155 /* The lock counts contains the number of extra data items placed or
\r
2156 removed from the queue while the queue was locked. When a queue is
\r
2157 locked items can be added or removed, but the event lists cannot be
\r
2159 taskENTER_CRITICAL();
\r
2161 int8_t cTxLock = pxQueue->cTxLock;
\r
2163 /* See if data was added to the queue while it was locked. */
\r
2164 while( cTxLock > queueLOCKED_UNMODIFIED )
\r
2166 /* Data was posted while the queue was locked. Are any tasks
\r
2167 blocked waiting for data to become available? */
\r
2168 #if ( configUSE_QUEUE_SETS == 1 )
\r
2170 if( pxQueue->pxQueueSetContainer != NULL )
\r
2172 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
2174 /* The queue is a member of a queue set, and posting to
\r
2175 the queue set caused a higher priority task to unblock.
\r
2176 A context switch is required. */
\r
2177 vTaskMissedYield();
\r
2181 mtCOVERAGE_TEST_MARKER();
\r
2186 /* Tasks that are removed from the event list will get
\r
2187 added to the pending ready list as the scheduler is still
\r
2189 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2191 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2193 /* The task waiting has a higher priority so record that a
\r
2194 context switch is required. */
\r
2195 vTaskMissedYield();
\r
2199 mtCOVERAGE_TEST_MARKER();
\r
2208 #else /* configUSE_QUEUE_SETS */
\r
2210 /* Tasks that are removed from the event list will get added to
\r
2211 the pending ready list as the scheduler is still suspended. */
\r
2212 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2214 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2216 /* The task waiting has a higher priority so record that
\r
2217 a context switch is required. */
\r
2218 vTaskMissedYield();
\r
2222 mtCOVERAGE_TEST_MARKER();
\r
2230 #endif /* configUSE_QUEUE_SETS */
\r
2235 pxQueue->cTxLock = queueUNLOCKED;
\r
2237 taskEXIT_CRITICAL();
\r
2239 /* Do the same for the Rx lock. */
\r
2240 taskENTER_CRITICAL();
\r
2242 int8_t cRxLock = pxQueue->cRxLock;
\r
2244 while( cRxLock > queueLOCKED_UNMODIFIED )
\r
2246 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2248 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2250 vTaskMissedYield();
\r
2254 mtCOVERAGE_TEST_MARKER();
\r
2265 pxQueue->cRxLock = queueUNLOCKED;
\r
2267 taskEXIT_CRITICAL();
\r
2269 /*-----------------------------------------------------------*/
\r
2271 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2273 BaseType_t xReturn;
\r
2275 taskENTER_CRITICAL();
\r
2277 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2283 xReturn = pdFALSE;
\r
2286 taskEXIT_CRITICAL();
\r
2290 /*-----------------------------------------------------------*/
\r
2292 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2294 BaseType_t xReturn;
\r
2296 configASSERT( xQueue );
\r
2297 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2303 xReturn = pdFALSE;
\r
2307 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2308 /*-----------------------------------------------------------*/
\r
2310 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2312 BaseType_t xReturn;
\r
2314 taskENTER_CRITICAL();
\r
2316 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2322 xReturn = pdFALSE;
\r
2325 taskEXIT_CRITICAL();
\r
2329 /*-----------------------------------------------------------*/
\r
2331 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2333 BaseType_t xReturn;
\r
2335 configASSERT( xQueue );
\r
2336 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2342 xReturn = pdFALSE;
\r
2346 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2347 /*-----------------------------------------------------------*/
\r
2349 #if ( configUSE_CO_ROUTINES == 1 )
\r
2351 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2353 BaseType_t xReturn;
\r
2354 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2356 /* If the queue is already full we may have to block. A critical section
\r
2357 is required to prevent an interrupt removing something from the queue
\r
2358 between the check to see if the queue is full and blocking on the queue. */
\r
2359 portDISABLE_INTERRUPTS();
\r
2361 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2363 /* The queue is full - do we want to block or just leave without
\r
2365 if( xTicksToWait > ( TickType_t ) 0 )
\r
2367 /* As this is called from a coroutine we cannot block directly, but
\r
2368 return indicating that we need to block. */
\r
2369 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2370 portENABLE_INTERRUPTS();
\r
2371 return errQUEUE_BLOCKED;
\r
2375 portENABLE_INTERRUPTS();
\r
2376 return errQUEUE_FULL;
\r
2380 portENABLE_INTERRUPTS();
\r
2382 portDISABLE_INTERRUPTS();
\r
2384 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2386 /* There is room in the queue, copy the data into the queue. */
\r
2387 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2390 /* Were any co-routines waiting for data to become available? */
\r
2391 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2393 /* In this instance the co-routine could be placed directly
\r
2394 into the ready list as we are within a critical section.
\r
2395 Instead the same pending ready list mechanism is used as if
\r
2396 the event were caused from within an interrupt. */
\r
2397 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2399 /* The co-routine waiting has a higher priority so record
\r
2400 that a yield might be appropriate. */
\r
2401 xReturn = errQUEUE_YIELD;
\r
2405 mtCOVERAGE_TEST_MARKER();
\r
2410 mtCOVERAGE_TEST_MARKER();
\r
2415 xReturn = errQUEUE_FULL;
\r
2418 portENABLE_INTERRUPTS();
\r
2423 #endif /* configUSE_CO_ROUTINES */
\r
2424 /*-----------------------------------------------------------*/
\r
2426 #if ( configUSE_CO_ROUTINES == 1 )
\r
2428 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2430 BaseType_t xReturn;
\r
2431 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2433 /* If the queue is already empty we may have to block. A critical section
\r
2434 is required to prevent an interrupt adding something to the queue
\r
2435 between the check to see if the queue is empty and blocking on the queue. */
\r
2436 portDISABLE_INTERRUPTS();
\r
2438 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2440 /* There are no messages in the queue, do we want to block or just
\r
2441 leave with nothing? */
\r
2442 if( xTicksToWait > ( TickType_t ) 0 )
\r
2444 /* As this is a co-routine we cannot block directly, but return
\r
2445 indicating that we need to block. */
\r
2446 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2447 portENABLE_INTERRUPTS();
\r
2448 return errQUEUE_BLOCKED;
\r
2452 portENABLE_INTERRUPTS();
\r
2453 return errQUEUE_FULL;
\r
2458 mtCOVERAGE_TEST_MARKER();
\r
2461 portENABLE_INTERRUPTS();
\r
2463 portDISABLE_INTERRUPTS();
\r
2465 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2467 /* Data is available from the queue. */
\r
2468 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2469 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2471 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2475 mtCOVERAGE_TEST_MARKER();
\r
2477 --( pxQueue->uxMessagesWaiting );
\r
2478 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2482 /* Were any co-routines waiting for space to become available? */
\r
2483 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2485 /* In this instance the co-routine could be placed directly
\r
2486 into the ready list as we are within a critical section.
\r
2487 Instead the same pending ready list mechanism is used as if
\r
2488 the event were caused from within an interrupt. */
\r
2489 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2491 xReturn = errQUEUE_YIELD;
\r
2495 mtCOVERAGE_TEST_MARKER();
\r
2500 mtCOVERAGE_TEST_MARKER();
\r
2508 portENABLE_INTERRUPTS();
\r
2513 #endif /* configUSE_CO_ROUTINES */
\r
2514 /*-----------------------------------------------------------*/
\r
2516 #if ( configUSE_CO_ROUTINES == 1 )
\r
2518 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2520 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2522 /* Cannot block within an ISR so if there is no space on the queue then
\r
2523 exit without doing anything. */
\r
2524 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2526 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2528 /* We only want to wake one co-routine per ISR, so check that a
\r
2529 co-routine has not already been woken. */
\r
2530 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2532 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2534 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2540 mtCOVERAGE_TEST_MARKER();
\r
2545 mtCOVERAGE_TEST_MARKER();
\r
2550 mtCOVERAGE_TEST_MARKER();
\r
2555 mtCOVERAGE_TEST_MARKER();
\r
2558 return xCoRoutinePreviouslyWoken;
\r
2561 #endif /* configUSE_CO_ROUTINES */
\r
2562 /*-----------------------------------------------------------*/
\r
2564 #if ( configUSE_CO_ROUTINES == 1 )
\r
2566 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2568 BaseType_t xReturn;
\r
2569 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2571 /* We cannot block from an ISR, so check there is data available. If
\r
2572 not then just leave without doing anything. */
\r
2573 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2575 /* Copy the data from the queue. */
\r
2576 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2577 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2579 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2583 mtCOVERAGE_TEST_MARKER();
\r
2585 --( pxQueue->uxMessagesWaiting );
\r
2586 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2588 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2590 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2592 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2594 *pxCoRoutineWoken = pdTRUE;
\r
2598 mtCOVERAGE_TEST_MARKER();
\r
2603 mtCOVERAGE_TEST_MARKER();
\r
2608 mtCOVERAGE_TEST_MARKER();
\r
2621 #endif /* configUSE_CO_ROUTINES */
\r
2622 /*-----------------------------------------------------------*/
\r
2624 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2626 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2630 /* See if there is an empty space in the registry. A NULL name denotes
\r
2632 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2634 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2636 /* Store the information on this queue. */
\r
2637 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2638 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2640 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2645 mtCOVERAGE_TEST_MARKER();
\r
2650 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2651 /*-----------------------------------------------------------*/
\r
2653 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2655 const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2658 const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2660 /* Note there is nothing here to protect against another task adding or
\r
2661 removing entries from the registry while it is being searched. */
\r
2662 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2664 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2666 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2671 mtCOVERAGE_TEST_MARKER();
\r
2676 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
\r
2678 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2679 /*-----------------------------------------------------------*/
\r
2681 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2683 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2687 /* See if the handle of the queue being unregistered in actually in the
\r
2689 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2691 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2693 /* Set the name to NULL to show that this slot if free again. */
\r
2694 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2696 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2697 appear in the registry twice if it is added, removed, then
\r
2699 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2704 mtCOVERAGE_TEST_MARKER();
\r
2708 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2710 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2711 /*-----------------------------------------------------------*/
\r
2713 #if ( configUSE_TIMERS == 1 )
\r
2715 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2717 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2719 /* This function should not be called by application code hence the
\r
2720 'Restricted' in its name. It is not part of the public API. It is
\r
2721 designed for use by kernel code, and has special calling requirements.
\r
2722 It can result in vListInsert() being called on a list that can only
\r
2723 possibly ever have one item in it, so the list will be fast, but even
\r
2724 so it should be called with the scheduler locked and not from a critical
\r
2727 /* Only do anything if there are no messages in the queue. This function
\r
2728 will not actually cause the task to block, just place it on a blocked
\r
2729 list. It will not block until the scheduler is unlocked - at which
\r
2730 time a yield will be performed. If an item is added to the queue while
\r
2731 the queue is locked, and the calling task blocks on the queue, then the
\r
2732 calling task will be immediately unblocked when the queue is unlocked. */
\r
2733 prvLockQueue( pxQueue );
\r
2734 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2736 /* There is nothing in the queue, block for the specified period. */
\r
2737 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2741 mtCOVERAGE_TEST_MARKER();
\r
2743 prvUnlockQueue( pxQueue );
\r
2746 #endif /* configUSE_TIMERS */
\r
2747 /*-----------------------------------------------------------*/
\r
2749 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
2751 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2753 QueueSetHandle_t pxQueue;
\r
2755 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2760 #endif /* configUSE_QUEUE_SETS */
\r
2761 /*-----------------------------------------------------------*/
\r
2763 #if ( configUSE_QUEUE_SETS == 1 )
\r
2765 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2767 BaseType_t xReturn;
\r
2769 taskENTER_CRITICAL();
\r
2771 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2773 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2776 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2778 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2779 items in the queue/semaphore. */
\r
2784 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2788 taskEXIT_CRITICAL();
\r
2793 #endif /* configUSE_QUEUE_SETS */
\r
2794 /*-----------------------------------------------------------*/
\r
2796 #if ( configUSE_QUEUE_SETS == 1 )
\r
2798 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2800 BaseType_t xReturn;
\r
2801 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2803 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2805 /* The queue was not a member of the set. */
\r
2808 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2810 /* It is dangerous to remove a queue from a set when the queue is
\r
2811 not empty because the queue set will still hold pending events for
\r
2817 taskENTER_CRITICAL();
\r
2819 /* The queue is no longer contained in the set. */
\r
2820 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2822 taskEXIT_CRITICAL();
\r
2827 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2829 #endif /* configUSE_QUEUE_SETS */
\r
2830 /*-----------------------------------------------------------*/
\r
2832 #if ( configUSE_QUEUE_SETS == 1 )
\r
2834 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2836 QueueSetMemberHandle_t xReturn = NULL;
\r
2838 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2842 #endif /* configUSE_QUEUE_SETS */
\r
2843 /*-----------------------------------------------------------*/
\r
2845 #if ( configUSE_QUEUE_SETS == 1 )
\r
2847 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2849 QueueSetMemberHandle_t xReturn = NULL;
\r
2851 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2855 #endif /* configUSE_QUEUE_SETS */
\r
2856 /*-----------------------------------------------------------*/
\r
2858 #if ( configUSE_QUEUE_SETS == 1 )
\r
2860 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2862 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2863 BaseType_t xReturn = pdFALSE;
\r
2865 /* This function must be called form a critical section. */
\r
2867 configASSERT( pxQueueSetContainer );
\r
2868 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2870 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2872 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
\r
2874 traceQUEUE_SEND( pxQueueSetContainer );
\r
2876 /* The data copied is the handle of the queue that contains data. */
\r
2877 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2879 if( cTxLock == queueUNLOCKED )
\r
2881 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2883 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2885 /* The task waiting has a higher priority. */
\r
2890 mtCOVERAGE_TEST_MARKER();
\r
2895 mtCOVERAGE_TEST_MARKER();
\r
2900 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
2905 mtCOVERAGE_TEST_MARKER();
\r
2911 #endif /* configUSE_QUEUE_SETS */
\r