2 * FreeRTOS Kernel V10.0.1
\r
3 * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software.
\r
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 * http://www.FreeRTOS.org
\r
23 * http://aws.amazon.com/freertos
\r
25 * 1 tab == 4 spaces!
\r
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
32 all the API functions to use the MPU wrappers. That should only be done when
\r
33 task.h is included from an application file. */
\r
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
36 #include "FreeRTOS.h"
\r
40 #if ( configUSE_CO_ROUTINES == 1 )
\r
41 #include "croutine.h"
\r
44 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
45 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
46 header files above, but not in this file, in order to generate the correct
\r
47 privileged Vs unprivileged linkage and placement. */
\r
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
51 /* Constants used with the cRxLock and cTxLock structure members. */
\r
52 #define queueUNLOCKED ( ( int8_t ) -1 )
\r
53 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
\r
55 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
56 pcTail members are used as pointers into the queue storage area. When the
\r
57 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
58 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
59 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
60 names to the pcHead and pcTail structure members to ensure the readability of
\r
61 the code is maintained despite this dual use of two structure members. An
\r
62 alternative implementation would be to use a union, but use of a union is
\r
63 against the coding standard (although an exception to the standard has been
\r
64 permitted where the dual use also significantly changes the type of the
\r
65 structure member). */
\r
66 #define pxMutexHolder pcTail
\r
67 #define uxQueueType pcHead
\r
68 #define queueQUEUE_IS_MUTEX NULL
\r
70 /* Semaphores do not actually store or copy data, so have an item size of
\r
72 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
73 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
75 #if( configUSE_PREEMPTION == 0 )
\r
76 /* If the cooperative scheduler is being used then a yield should not be
\r
77 performed just because a higher priority task has been woken. */
\r
78 #define queueYIELD_IF_USING_PREEMPTION()
\r
80 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
84 * Definition of the queue used by the scheduler.
\r
85 * Items are queued by copy, not reference. See the following link for the
\r
86 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
88 typedef struct QueueDefinition
\r
90 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
91 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
92 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
94 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
96 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
97 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
100 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
101 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
103 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
104 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
105 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
107 volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
108 volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
110 #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
111 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
\r
114 #if ( configUSE_QUEUE_SETS == 1 )
\r
115 struct QueueDefinition *pxQueueSetContainer;
\r
118 #if ( configUSE_TRACE_FACILITY == 1 )
\r
119 UBaseType_t uxQueueNumber;
\r
120 uint8_t ucQueueType;
\r
125 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
126 name below to enable the use of older kernel aware debuggers. */
\r
127 typedef xQUEUE Queue_t;
\r
129 /*-----------------------------------------------------------*/
\r
132 * The queue registry is just a means for kernel aware debuggers to locate
\r
133 * queue structures. It has no other purpose so is an optional component.
\r
135 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
137 /* The type stored within the queue registry array. This allows a name
\r
138 to be assigned to each queue making kernel aware debugging a little
\r
139 more user friendly. */
\r
140 typedef struct QUEUE_REGISTRY_ITEM
\r
142 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
143 QueueHandle_t xHandle;
\r
144 } xQueueRegistryItem;
\r
146 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
147 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
149 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
151 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
152 The pcQueueName member of a structure being NULL is indicative of the
\r
153 array position being vacant. */
\r
154 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
156 #endif /* configQUEUE_REGISTRY_SIZE */
\r
159 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
160 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
161 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
162 * queue is locked it will instead increment the appropriate queue lock count
\r
163 * to indicate that a task may require unblocking. When the queue in unlocked
\r
164 * these lock counts are inspected, and the appropriate action taken.
\r
166 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
169 * Uses a critical section to determine if there is any data in a queue.
\r
171 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
173 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
176 * Uses a critical section to determine if there is any space in a queue.
\r
178 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
180 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
183 * Copies an item into the queue, either at the front of the queue or the
\r
184 * back of the queue.
\r
186 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
189 * Copies an item out of a queue.
\r
191 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
193 #if ( configUSE_QUEUE_SETS == 1 )
\r
195 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
196 * the queue set that the queue contains data.
\r
198 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
202 * Called after a Queue_t structure has been allocated either statically or
\r
203 * dynamically to fill in the structure's members.
\r
205 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
208 * Mutexes are a special type of queue. When a mutex is created, first the
\r
209 * queue is created, then prvInitialiseMutex() is called to configure the queue
\r
212 #if( configUSE_MUTEXES == 1 )
\r
213 static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
216 #if( configUSE_MUTEXES == 1 )
\r
218 * If a task waiting for a mutex causes the mutex holder to inherit a
\r
219 * priority, but the waiting task times out, then the holder should
\r
220 * disinherit the priority - but only down to the highest priority of any
\r
221 * other tasks that are waiting for the same mutex. This function returns
\r
224 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
226 /*-----------------------------------------------------------*/
\r
229 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
230 * accessing the queue event lists.
\r
232 #define prvLockQueue( pxQueue ) \
\r
233 taskENTER_CRITICAL(); \
\r
235 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
\r
237 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
\r
239 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
\r
241 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
\r
244 taskEXIT_CRITICAL()
\r
245 /*-----------------------------------------------------------*/
\r
247 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
249 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
251 configASSERT( pxQueue );
\r
253 taskENTER_CRITICAL();
\r
255 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
256 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
257 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
258 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
259 pxQueue->cRxLock = queueUNLOCKED;
\r
260 pxQueue->cTxLock = queueUNLOCKED;
\r
262 if( xNewQueue == pdFALSE )
\r
264 /* If there are tasks blocked waiting to read from the queue, then
\r
265 the tasks will remain blocked as after this function exits the queue
\r
266 will still be empty. If there are tasks blocked waiting to write to
\r
267 the queue, then one should be unblocked as after this function exits
\r
268 it will be possible to write to it. */
\r
269 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
271 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
273 queueYIELD_IF_USING_PREEMPTION();
\r
277 mtCOVERAGE_TEST_MARKER();
\r
282 mtCOVERAGE_TEST_MARKER();
\r
287 /* Ensure the event queues start in the correct state. */
\r
288 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
289 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
292 taskEXIT_CRITICAL();
\r
294 /* A value is returned for calling semantic consistency with previous
\r
298 /*-----------------------------------------------------------*/
\r
300 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
302 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
\r
304 Queue_t *pxNewQueue;
\r
306 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
308 /* The StaticQueue_t structure and the queue storage area must be
\r
310 configASSERT( pxStaticQueue != NULL );
\r
312 /* A queue storage area should be provided if the item size is not 0, and
\r
313 should not be provided if the item size is 0. */
\r
314 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
\r
315 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
\r
317 #if( configASSERT_DEFINED == 1 )
\r
319 /* Sanity check that the size of the structure used to declare a
\r
320 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
\r
321 the real queue and semaphore structures. */
\r
322 volatile size_t xSize = sizeof( StaticQueue_t );
\r
323 configASSERT( xSize == sizeof( Queue_t ) );
\r
325 #endif /* configASSERT_DEFINED */
\r
327 /* The address of a statically allocated queue was passed in, use it.
\r
328 The address of a statically allocated storage area was also passed in
\r
329 but is already set. */
\r
330 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
332 if( pxNewQueue != NULL )
\r
334 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
336 /* Queues can be allocated wither statically or dynamically, so
\r
337 note this queue was allocated statically in case the queue is
\r
339 pxNewQueue->ucStaticallyAllocated = pdTRUE;
\r
341 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
343 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
347 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
353 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
354 /*-----------------------------------------------------------*/
\r
356 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
358 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
360 Queue_t *pxNewQueue;
\r
361 size_t xQueueSizeInBytes;
\r
362 uint8_t *pucQueueStorage;
\r
364 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
366 if( uxItemSize == ( UBaseType_t ) 0 )
\r
368 /* There is not going to be a queue storage area. */
\r
369 xQueueSizeInBytes = ( size_t ) 0;
\r
373 /* Allocate enough space to hold the maximum number of items that
\r
374 can be in the queue at any time. */
\r
375 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
378 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
380 if( pxNewQueue != NULL )
\r
382 /* Jump past the queue structure to find the location of the queue
\r
384 pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
386 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
388 /* Queues can be created either statically or dynamically, so
\r
389 note this task was created dynamically in case it is later
\r
391 pxNewQueue->ucStaticallyAllocated = pdFALSE;
\r
393 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
395 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
399 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
405 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
406 /*-----------------------------------------------------------*/
\r
408 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
\r
410 /* Remove compiler warnings about unused parameters should
\r
411 configUSE_TRACE_FACILITY not be set to 1. */
\r
412 ( void ) ucQueueType;
\r
414 if( uxItemSize == ( UBaseType_t ) 0 )
\r
416 /* No RAM was allocated for the queue storage area, but PC head cannot
\r
417 be set to NULL because NULL is used as a key to say the queue is used as
\r
418 a mutex. Therefore just set pcHead to point to the queue as a benign
\r
419 value that is known to be within the memory map. */
\r
420 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
424 /* Set the head to the start of the queue storage area. */
\r
425 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
\r
428 /* Initialise the queue members as described where the queue type is
\r
430 pxNewQueue->uxLength = uxQueueLength;
\r
431 pxNewQueue->uxItemSize = uxItemSize;
\r
432 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
434 #if ( configUSE_TRACE_FACILITY == 1 )
\r
436 pxNewQueue->ucQueueType = ucQueueType;
\r
438 #endif /* configUSE_TRACE_FACILITY */
\r
440 #if( configUSE_QUEUE_SETS == 1 )
\r
442 pxNewQueue->pxQueueSetContainer = NULL;
\r
444 #endif /* configUSE_QUEUE_SETS */
\r
446 traceQUEUE_CREATE( pxNewQueue );
\r
448 /*-----------------------------------------------------------*/
\r
450 #if( configUSE_MUTEXES == 1 )
\r
452 static void prvInitialiseMutex( Queue_t *pxNewQueue )
\r
454 if( pxNewQueue != NULL )
\r
456 /* The queue create function will set all the queue structure members
\r
457 correctly for a generic queue, but this function is creating a
\r
458 mutex. Overwrite those members that need to be set differently -
\r
459 in particular the information required for priority inheritance. */
\r
460 pxNewQueue->pxMutexHolder = NULL;
\r
461 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
463 /* In case this is a recursive mutex. */
\r
464 pxNewQueue->u.uxRecursiveCallCount = 0;
\r
466 traceCREATE_MUTEX( pxNewQueue );
\r
468 /* Start with the semaphore in the expected state. */
\r
469 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
473 traceCREATE_MUTEX_FAILED();
\r
477 #endif /* configUSE_MUTEXES */
\r
478 /*-----------------------------------------------------------*/
\r
480 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
482 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
484 Queue_t *pxNewQueue;
\r
485 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
487 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
\r
488 prvInitialiseMutex( pxNewQueue );
\r
493 #endif /* configUSE_MUTEXES */
\r
494 /*-----------------------------------------------------------*/
\r
496 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
498 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
\r
500 Queue_t *pxNewQueue;
\r
501 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
503 /* Prevent compiler warnings about unused parameters if
\r
504 configUSE_TRACE_FACILITY does not equal 1. */
\r
505 ( void ) ucQueueType;
\r
507 pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
\r
508 prvInitialiseMutex( pxNewQueue );
\r
513 #endif /* configUSE_MUTEXES */
\r
514 /*-----------------------------------------------------------*/
\r
516 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
518 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
522 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
523 be called directly. Note: This is a good way of determining if the
\r
524 calling task is the mutex holder, but not a good way of determining the
\r
525 identity of the mutex holder, as the holder may change between the
\r
526 following critical section exiting and the function returning. */
\r
527 taskENTER_CRITICAL();
\r
529 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
531 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
538 taskEXIT_CRITICAL();
\r
541 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
544 /*-----------------------------------------------------------*/
\r
546 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
548 void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
\r
552 configASSERT( xSemaphore );
\r
554 /* Mutexes cannot be used in interrupt service routines, so the mutex
\r
555 holder should not change in an ISR, and therefore a critical section is
\r
556 not required here. */
\r
557 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
559 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
567 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
570 /*-----------------------------------------------------------*/
\r
572 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
574 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
576 BaseType_t xReturn;
\r
577 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
579 configASSERT( pxMutex );
\r
581 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
582 change outside of this task. If this task does not hold the mutex then
\r
583 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
584 this is the only condition we are interested in it does not matter if
\r
585 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
586 mutual exclusion is required to test the pxMutexHolder variable. */
\r
587 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
589 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
591 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
592 the task handle, therefore no underflow check is required. Also,
\r
593 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
594 there can only be one, no mutual exclusion is required to modify the
\r
595 uxRecursiveCallCount member. */
\r
596 ( pxMutex->u.uxRecursiveCallCount )--;
\r
598 /* Has the recursive call count unwound to 0? */
\r
599 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
601 /* Return the mutex. This will automatically unblock any other
\r
602 task that might be waiting to access the mutex. */
\r
603 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
607 mtCOVERAGE_TEST_MARKER();
\r
614 /* The mutex cannot be given because the calling task is not the
\r
618 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
624 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
625 /*-----------------------------------------------------------*/
\r
627 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
629 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
631 BaseType_t xReturn;
\r
632 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
634 configASSERT( pxMutex );
\r
636 /* Comments regarding mutual exclusion as per those within
\r
637 xQueueGiveMutexRecursive(). */
\r
639 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
641 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
643 ( pxMutex->u.uxRecursiveCallCount )++;
\r
648 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
\r
650 /* pdPASS will only be returned if the mutex was successfully
\r
651 obtained. The calling task may have entered the Blocked state
\r
652 before reaching here. */
\r
653 if( xReturn != pdFAIL )
\r
655 ( pxMutex->u.uxRecursiveCallCount )++;
\r
659 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
666 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
667 /*-----------------------------------------------------------*/
\r
669 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
671 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
\r
673 QueueHandle_t xHandle;
\r
675 configASSERT( uxMaxCount != 0 );
\r
676 configASSERT( uxInitialCount <= uxMaxCount );
\r
678 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
680 if( xHandle != NULL )
\r
682 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
684 traceCREATE_COUNTING_SEMAPHORE();
\r
688 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
694 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
695 /*-----------------------------------------------------------*/
\r
697 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
699 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
701 QueueHandle_t xHandle;
\r
703 configASSERT( uxMaxCount != 0 );
\r
704 configASSERT( uxInitialCount <= uxMaxCount );
\r
706 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
708 if( xHandle != NULL )
\r
710 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
712 traceCREATE_COUNTING_SEMAPHORE();
\r
716 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
722 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
723 /*-----------------------------------------------------------*/
\r
725 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
727 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
728 TimeOut_t xTimeOut;
\r
729 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
731 configASSERT( pxQueue );
\r
732 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
733 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
734 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
736 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
741 /* This function relaxes the coding standard somewhat to allow return
\r
742 statements within the function itself. This is done in the interest
\r
743 of execution time efficiency. */
\r
746 taskENTER_CRITICAL();
\r
748 /* Is there room on the queue now? The running task must be the
\r
749 highest priority task wanting to access the queue. If the head item
\r
750 in the queue is to be overwritten then it does not matter if the
\r
752 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
754 traceQUEUE_SEND( pxQueue );
\r
756 #if ( configUSE_QUEUE_SETS == 1 )
\r
758 UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
760 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
762 if( pxQueue->pxQueueSetContainer != NULL )
\r
764 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
\r
766 /* Do not notify the queue set as an existing item
\r
767 was overwritten in the queue so the number of items
\r
768 in the queue has not changed. */
\r
769 mtCOVERAGE_TEST_MARKER();
\r
771 else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
773 /* The queue is a member of a queue set, and posting
\r
774 to the queue set caused a higher priority task to
\r
775 unblock. A context switch is required. */
\r
776 queueYIELD_IF_USING_PREEMPTION();
\r
780 mtCOVERAGE_TEST_MARKER();
\r
785 /* If there was a task waiting for data to arrive on the
\r
786 queue then unblock it now. */
\r
787 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
789 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
791 /* The unblocked task has a priority higher than
\r
792 our own so yield immediately. Yes it is ok to
\r
793 do this from within the critical section - the
\r
794 kernel takes care of that. */
\r
795 queueYIELD_IF_USING_PREEMPTION();
\r
799 mtCOVERAGE_TEST_MARKER();
\r
802 else if( xYieldRequired != pdFALSE )
\r
804 /* This path is a special case that will only get
\r
805 executed if the task was holding multiple mutexes
\r
806 and the mutexes were given back in an order that is
\r
807 different to that in which they were taken. */
\r
808 queueYIELD_IF_USING_PREEMPTION();
\r
812 mtCOVERAGE_TEST_MARKER();
\r
816 #else /* configUSE_QUEUE_SETS */
\r
818 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
820 /* If there was a task waiting for data to arrive on the
\r
821 queue then unblock it now. */
\r
822 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
824 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
826 /* The unblocked task has a priority higher than
\r
827 our own so yield immediately. Yes it is ok to do
\r
828 this from within the critical section - the kernel
\r
829 takes care of that. */
\r
830 queueYIELD_IF_USING_PREEMPTION();
\r
834 mtCOVERAGE_TEST_MARKER();
\r
837 else if( xYieldRequired != pdFALSE )
\r
839 /* This path is a special case that will only get
\r
840 executed if the task was holding multiple mutexes and
\r
841 the mutexes were given back in an order that is
\r
842 different to that in which they were taken. */
\r
843 queueYIELD_IF_USING_PREEMPTION();
\r
847 mtCOVERAGE_TEST_MARKER();
\r
850 #endif /* configUSE_QUEUE_SETS */
\r
852 taskEXIT_CRITICAL();
\r
857 if( xTicksToWait == ( TickType_t ) 0 )
\r
859 /* The queue was full and no block time is specified (or
\r
860 the block time has expired) so leave now. */
\r
861 taskEXIT_CRITICAL();
\r
863 /* Return to the original privilege level before exiting
\r
865 traceQUEUE_SEND_FAILED( pxQueue );
\r
866 return errQUEUE_FULL;
\r
868 else if( xEntryTimeSet == pdFALSE )
\r
870 /* The queue was full and a block time was specified so
\r
871 configure the timeout structure. */
\r
872 vTaskInternalSetTimeOutState( &xTimeOut );
\r
873 xEntryTimeSet = pdTRUE;
\r
877 /* Entry time was already set. */
\r
878 mtCOVERAGE_TEST_MARKER();
\r
882 taskEXIT_CRITICAL();
\r
884 /* Interrupts and other tasks can send to and receive from the queue
\r
885 now the critical section has been exited. */
\r
888 prvLockQueue( pxQueue );
\r
890 /* Update the timeout state to see if it has expired yet. */
\r
891 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
893 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
895 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
896 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
898 /* Unlocking the queue means queue events can effect the
\r
899 event list. It is possible that interrupts occurring now
\r
900 remove this task from the event list again - but as the
\r
901 scheduler is suspended the task will go onto the pending
\r
902 ready last instead of the actual ready list. */
\r
903 prvUnlockQueue( pxQueue );
\r
905 /* Resuming the scheduler will move tasks from the pending
\r
906 ready list into the ready list - so it is feasible that this
\r
907 task is already in a ready list before it yields - in which
\r
908 case the yield will not cause a context switch unless there
\r
909 is also a higher priority task in the pending ready list. */
\r
910 if( xTaskResumeAll() == pdFALSE )
\r
912 portYIELD_WITHIN_API();
\r
918 prvUnlockQueue( pxQueue );
\r
919 ( void ) xTaskResumeAll();
\r
924 /* The timeout has expired. */
\r
925 prvUnlockQueue( pxQueue );
\r
926 ( void ) xTaskResumeAll();
\r
928 traceQUEUE_SEND_FAILED( pxQueue );
\r
929 return errQUEUE_FULL;
\r
933 /*-----------------------------------------------------------*/
\r
935 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
937 BaseType_t xReturn;
\r
938 UBaseType_t uxSavedInterruptStatus;
\r
939 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
941 configASSERT( pxQueue );
\r
942 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
943 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
945 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
946 system call (or maximum API call) interrupt priority. Interrupts that are
\r
947 above the maximum system call priority are kept permanently enabled, even
\r
948 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
949 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
950 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
951 failure if a FreeRTOS API function is called from an interrupt that has been
\r
952 assigned a priority above the configured maximum system call priority.
\r
953 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
954 that have been assigned a priority at or (logically) below the maximum
\r
955 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
956 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
957 More information (albeit Cortex-M specific) is provided on the following
\r
958 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
959 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
961 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
962 in the queue. Also don't directly wake a task that was blocked on a queue
\r
963 read, instead return a flag to say whether a context switch is required or
\r
964 not (i.e. has a task with a higher priority than us been woken by this
\r
966 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
968 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
970 const int8_t cTxLock = pxQueue->cTxLock;
\r
972 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
974 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
975 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
976 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
977 called here even though the disinherit function does not check if
\r
978 the scheduler is suspended before accessing the ready lists. */
\r
979 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
981 /* The event list is not altered if the queue is locked. This will
\r
982 be done when the queue is unlocked later. */
\r
983 if( cTxLock == queueUNLOCKED )
\r
985 #if ( configUSE_QUEUE_SETS == 1 )
\r
987 if( pxQueue->pxQueueSetContainer != NULL )
\r
989 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
991 /* The queue is a member of a queue set, and posting
\r
992 to the queue set caused a higher priority task to
\r
993 unblock. A context switch is required. */
\r
994 if( pxHigherPriorityTaskWoken != NULL )
\r
996 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1000 mtCOVERAGE_TEST_MARKER();
\r
1005 mtCOVERAGE_TEST_MARKER();
\r
1010 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1012 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1014 /* The task waiting has a higher priority so
\r
1015 record that a context switch is required. */
\r
1016 if( pxHigherPriorityTaskWoken != NULL )
\r
1018 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1022 mtCOVERAGE_TEST_MARKER();
\r
1027 mtCOVERAGE_TEST_MARKER();
\r
1032 mtCOVERAGE_TEST_MARKER();
\r
1036 #else /* configUSE_QUEUE_SETS */
\r
1038 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1040 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1042 /* The task waiting has a higher priority so record that a
\r
1043 context switch is required. */
\r
1044 if( pxHigherPriorityTaskWoken != NULL )
\r
1046 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1050 mtCOVERAGE_TEST_MARKER();
\r
1055 mtCOVERAGE_TEST_MARKER();
\r
1060 mtCOVERAGE_TEST_MARKER();
\r
1063 #endif /* configUSE_QUEUE_SETS */
\r
1067 /* Increment the lock count so the task that unlocks the queue
\r
1068 knows that data was posted while it was locked. */
\r
1069 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1076 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1077 xReturn = errQUEUE_FULL;
\r
1080 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1084 /*-----------------------------------------------------------*/
\r
1086 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1088 BaseType_t xReturn;
\r
1089 UBaseType_t uxSavedInterruptStatus;
\r
1090 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1092 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1093 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1094 read, instead return a flag to say whether a context switch is required or
\r
1095 not (i.e. has a task with a higher priority than us been woken by this
\r
1098 configASSERT( pxQueue );
\r
1100 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1101 if the item size is not 0. */
\r
1102 configASSERT( pxQueue->uxItemSize == 0 );
\r
1104 /* Normally a mutex would not be given from an interrupt, especially if
\r
1105 there is a mutex holder, as priority inheritance makes no sense for an
\r
1106 interrupts, only tasks. */
\r
1107 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1109 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1110 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1111 above the maximum system call priority are kept permanently enabled, even
\r
1112 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1113 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1114 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1115 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1116 assigned a priority above the configured maximum system call priority.
\r
1117 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1118 that have been assigned a priority at or (logically) below the maximum
\r
1119 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1120 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1121 More information (albeit Cortex-M specific) is provided on the following
\r
1122 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1123 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1125 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1127 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1129 /* When the queue is used to implement a semaphore no data is ever
\r
1130 moved through the queue but it is still valid to see if the queue 'has
\r
1132 if( uxMessagesWaiting < pxQueue->uxLength )
\r
1134 const int8_t cTxLock = pxQueue->cTxLock;
\r
1136 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1138 /* A task can only have an inherited priority if it is a mutex
\r
1139 holder - and if there is a mutex holder then the mutex cannot be
\r
1140 given from an ISR. As this is the ISR version of the function it
\r
1141 can be assumed there is no mutex holder and no need to determine if
\r
1142 priority disinheritance is needed. Simply increase the count of
\r
1143 messages (semaphores) available. */
\r
1144 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
1146 /* The event list is not altered if the queue is locked. This will
\r
1147 be done when the queue is unlocked later. */
\r
1148 if( cTxLock == queueUNLOCKED )
\r
1150 #if ( configUSE_QUEUE_SETS == 1 )
\r
1152 if( pxQueue->pxQueueSetContainer != NULL )
\r
1154 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
1156 /* The semaphore is a member of a queue set, and
\r
1157 posting to the queue set caused a higher priority
\r
1158 task to unblock. A context switch is required. */
\r
1159 if( pxHigherPriorityTaskWoken != NULL )
\r
1161 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1165 mtCOVERAGE_TEST_MARKER();
\r
1170 mtCOVERAGE_TEST_MARKER();
\r
1175 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1177 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1179 /* The task waiting has a higher priority so
\r
1180 record that a context switch is required. */
\r
1181 if( pxHigherPriorityTaskWoken != NULL )
\r
1183 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1187 mtCOVERAGE_TEST_MARKER();
\r
1192 mtCOVERAGE_TEST_MARKER();
\r
1197 mtCOVERAGE_TEST_MARKER();
\r
1201 #else /* configUSE_QUEUE_SETS */
\r
1203 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1205 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1207 /* The task waiting has a higher priority so record that a
\r
1208 context switch is required. */
\r
1209 if( pxHigherPriorityTaskWoken != NULL )
\r
1211 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1215 mtCOVERAGE_TEST_MARKER();
\r
1220 mtCOVERAGE_TEST_MARKER();
\r
1225 mtCOVERAGE_TEST_MARKER();
\r
1228 #endif /* configUSE_QUEUE_SETS */
\r
1232 /* Increment the lock count so the task that unlocks the queue
\r
1233 knows that data was posted while it was locked. */
\r
1234 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1241 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1242 xReturn = errQUEUE_FULL;
\r
1245 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1249 /*-----------------------------------------------------------*/
\r
1251 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1253 BaseType_t xEntryTimeSet = pdFALSE;
\r
1254 TimeOut_t xTimeOut;
\r
1255 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1257 /* Check the pointer is not NULL. */
\r
1258 configASSERT( ( pxQueue ) );
\r
1260 /* The buffer into which data is received can only be NULL if the data size
\r
1261 is zero (so no data is copied into the buffer. */
\r
1262 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1264 /* Cannot block if the scheduler is suspended. */
\r
1265 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1267 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1272 /* This function relaxes the coding standard somewhat to allow return
\r
1273 statements within the function itself. This is done in the interest
\r
1274 of execution time efficiency. */
\r
1278 taskENTER_CRITICAL();
\r
1280 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1282 /* Is there data in the queue now? To be running the calling task
\r
1283 must be the highest priority task wanting to access the queue. */
\r
1284 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1286 /* Data available, remove one item. */
\r
1287 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1288 traceQUEUE_RECEIVE( pxQueue );
\r
1289 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1291 /* There is now space in the queue, were any tasks waiting to
\r
1292 post to the queue? If so, unblock the highest priority waiting
\r
1294 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1296 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1298 queueYIELD_IF_USING_PREEMPTION();
\r
1302 mtCOVERAGE_TEST_MARKER();
\r
1307 mtCOVERAGE_TEST_MARKER();
\r
1310 taskEXIT_CRITICAL();
\r
1315 if( xTicksToWait == ( TickType_t ) 0 )
\r
1317 /* The queue was empty and no block time is specified (or
\r
1318 the block time has expired) so leave now. */
\r
1319 taskEXIT_CRITICAL();
\r
1320 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1321 return errQUEUE_EMPTY;
\r
1323 else if( xEntryTimeSet == pdFALSE )
\r
1325 /* The queue was empty and a block time was specified so
\r
1326 configure the timeout structure. */
\r
1327 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1328 xEntryTimeSet = pdTRUE;
\r
1332 /* Entry time was already set. */
\r
1333 mtCOVERAGE_TEST_MARKER();
\r
1337 taskEXIT_CRITICAL();
\r
1339 /* Interrupts and other tasks can send to and receive from the queue
\r
1340 now the critical section has been exited. */
\r
1342 vTaskSuspendAll();
\r
1343 prvLockQueue( pxQueue );
\r
1345 /* Update the timeout state to see if it has expired yet. */
\r
1346 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1348 /* The timeout has not expired. If the queue is still empty place
\r
1349 the task on the list of tasks waiting to receive from the queue. */
\r
1350 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1352 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1353 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1354 prvUnlockQueue( pxQueue );
\r
1355 if( xTaskResumeAll() == pdFALSE )
\r
1357 portYIELD_WITHIN_API();
\r
1361 mtCOVERAGE_TEST_MARKER();
\r
1366 /* The queue contains data again. Loop back to try and read the
\r
1368 prvUnlockQueue( pxQueue );
\r
1369 ( void ) xTaskResumeAll();
\r
1374 /* Timed out. If there is no data in the queue exit, otherwise loop
\r
1375 back and attempt to read the data. */
\r
1376 prvUnlockQueue( pxQueue );
\r
1377 ( void ) xTaskResumeAll();
\r
1379 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1381 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1382 return errQUEUE_EMPTY;
\r
1386 mtCOVERAGE_TEST_MARKER();
\r
1391 /*-----------------------------------------------------------*/
\r
1393 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
1395 BaseType_t xEntryTimeSet = pdFALSE;
\r
1396 TimeOut_t xTimeOut;
\r
1397 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1399 #if( configUSE_MUTEXES == 1 )
\r
1400 BaseType_t xInheritanceOccurred = pdFALSE;
\r
1403 /* Check the queue pointer is not NULL. */
\r
1404 configASSERT( ( pxQueue ) );
\r
1406 /* Check this really is a semaphore, in which case the item size will be
\r
1408 configASSERT( pxQueue->uxItemSize == 0 );
\r
1410 /* Cannot block if the scheduler is suspended. */
\r
1411 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1413 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1418 /* This function relaxes the coding standard somewhat to allow return
\r
1419 statements within the function itself. This is done in the interest
\r
1420 of execution time efficiency. */
\r
1424 taskENTER_CRITICAL();
\r
1426 /* Semaphores are queues with an item size of 0, and where the
\r
1427 number of messages in the queue is the semaphore's count value. */
\r
1428 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
\r
1430 /* Is there data in the queue now? To be running the calling task
\r
1431 must be the highest priority task wanting to access the queue. */
\r
1432 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
\r
1434 traceQUEUE_RECEIVE( pxQueue );
\r
1436 /* Semaphores are queues with a data size of zero and where the
\r
1437 messages waiting is the semaphore's count. Reduce the count. */
\r
1438 pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
\r
1440 #if ( configUSE_MUTEXES == 1 )
\r
1442 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1444 /* Record the information required to implement
\r
1445 priority inheritance should it become necessary. */
\r
1446 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1450 mtCOVERAGE_TEST_MARKER();
\r
1453 #endif /* configUSE_MUTEXES */
\r
1455 /* Check to see if other tasks are blocked waiting to give the
\r
1456 semaphore, and if so, unblock the highest priority such task. */
\r
1457 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1459 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1461 queueYIELD_IF_USING_PREEMPTION();
\r
1465 mtCOVERAGE_TEST_MARKER();
\r
1470 mtCOVERAGE_TEST_MARKER();
\r
1473 taskEXIT_CRITICAL();
\r
1478 if( xTicksToWait == ( TickType_t ) 0 )
\r
1480 /* For inheritance to have occurred there must have been an
\r
1481 initial timeout, and an adjusted timeout cannot become 0, as
\r
1482 if it were 0 the function would have exited. */
\r
1483 #if( configUSE_MUTEXES == 1 )
\r
1485 configASSERT( xInheritanceOccurred == pdFALSE );
\r
1487 #endif /* configUSE_MUTEXES */
\r
1489 /* The semaphore count was 0 and no block time is specified
\r
1490 (or the block time has expired) so exit now. */
\r
1491 taskEXIT_CRITICAL();
\r
1492 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1493 return errQUEUE_EMPTY;
\r
1495 else if( xEntryTimeSet == pdFALSE )
\r
1497 /* The semaphore count was 0 and a block time was specified
\r
1498 so configure the timeout structure ready to block. */
\r
1499 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1500 xEntryTimeSet = pdTRUE;
\r
1504 /* Entry time was already set. */
\r
1505 mtCOVERAGE_TEST_MARKER();
\r
1509 taskEXIT_CRITICAL();
\r
1511 /* Interrupts and other tasks can give to and take from the semaphore
\r
1512 now the critical section has been exited. */
\r
1514 vTaskSuspendAll();
\r
1515 prvLockQueue( pxQueue );
\r
1517 /* Update the timeout state to see if it has expired yet. */
\r
1518 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1520 /* A block time is specified and not expired. If the semaphore
\r
1521 count is 0 then enter the Blocked state to wait for a semaphore to
\r
1522 become available. As semaphores are implemented with queues the
\r
1523 queue being empty is equivalent to the semaphore count being 0. */
\r
1524 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1526 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1528 #if ( configUSE_MUTEXES == 1 )
\r
1530 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1532 taskENTER_CRITICAL();
\r
1534 xInheritanceOccurred = xTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1536 taskEXIT_CRITICAL();
\r
1540 mtCOVERAGE_TEST_MARKER();
\r
1545 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1546 prvUnlockQueue( pxQueue );
\r
1547 if( xTaskResumeAll() == pdFALSE )
\r
1549 portYIELD_WITHIN_API();
\r
1553 mtCOVERAGE_TEST_MARKER();
\r
1558 /* There was no timeout and the semaphore count was not 0, so
\r
1559 attempt to take the semaphore again. */
\r
1560 prvUnlockQueue( pxQueue );
\r
1561 ( void ) xTaskResumeAll();
\r
1567 prvUnlockQueue( pxQueue );
\r
1568 ( void ) xTaskResumeAll();
\r
1570 /* If the semaphore count is 0 exit now as the timeout has
\r
1571 expired. Otherwise return to attempt to take the semaphore that is
\r
1572 known to be available. As semaphores are implemented by queues the
\r
1573 queue being empty is equivalent to the semaphore count being 0. */
\r
1574 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1576 #if ( configUSE_MUTEXES == 1 )
\r
1578 /* xInheritanceOccurred could only have be set if
\r
1579 pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
\r
1580 test the mutex type again to check it is actually a mutex. */
\r
1581 if( xInheritanceOccurred != pdFALSE )
\r
1583 taskENTER_CRITICAL();
\r
1585 UBaseType_t uxHighestWaitingPriority;
\r
1587 /* This task blocking on the mutex caused another
\r
1588 task to inherit this task's priority. Now this task
\r
1589 has timed out the priority should be disinherited
\r
1590 again, but only as low as the next highest priority
\r
1591 task that is waiting for the same mutex. */
\r
1592 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
\r
1593 vTaskPriorityDisinheritAfterTimeout( ( void * ) pxQueue->pxMutexHolder, uxHighestWaitingPriority );
\r
1595 taskEXIT_CRITICAL();
\r
1598 #endif /* configUSE_MUTEXES */
\r
1600 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1601 return errQUEUE_EMPTY;
\r
1605 mtCOVERAGE_TEST_MARKER();
\r
1610 /*-----------------------------------------------------------*/
\r
1612 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1614 BaseType_t xEntryTimeSet = pdFALSE;
\r
1615 TimeOut_t xTimeOut;
\r
1616 int8_t *pcOriginalReadPosition;
\r
1617 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1619 /* Check the pointer is not NULL. */
\r
1620 configASSERT( ( pxQueue ) );
\r
1622 /* The buffer into which data is received can only be NULL if the data size
\r
1623 is zero (so no data is copied into the buffer. */
\r
1624 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1626 /* Cannot block if the scheduler is suspended. */
\r
1627 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1629 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1634 /* This function relaxes the coding standard somewhat to allow return
\r
1635 statements within the function itself. This is done in the interest
\r
1636 of execution time efficiency. */
\r
1640 taskENTER_CRITICAL();
\r
1642 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1644 /* Is there data in the queue now? To be running the calling task
\r
1645 must be the highest priority task wanting to access the queue. */
\r
1646 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1648 /* Remember the read position so it can be reset after the data
\r
1649 is read from the queue as this function is only peeking the
\r
1650 data, not removing it. */
\r
1651 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1653 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1654 traceQUEUE_PEEK( pxQueue );
\r
1656 /* The data is not being removed, so reset the read pointer. */
\r
1657 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1659 /* The data is being left in the queue, so see if there are
\r
1660 any other tasks waiting for the data. */
\r
1661 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1663 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1665 /* The task waiting has a higher priority than this task. */
\r
1666 queueYIELD_IF_USING_PREEMPTION();
\r
1670 mtCOVERAGE_TEST_MARKER();
\r
1675 mtCOVERAGE_TEST_MARKER();
\r
1678 taskEXIT_CRITICAL();
\r
1683 if( xTicksToWait == ( TickType_t ) 0 )
\r
1685 /* The queue was empty and no block time is specified (or
\r
1686 the block time has expired) so leave now. */
\r
1687 taskEXIT_CRITICAL();
\r
1688 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1689 return errQUEUE_EMPTY;
\r
1691 else if( xEntryTimeSet == pdFALSE )
\r
1693 /* The queue was empty and a block time was specified so
\r
1694 configure the timeout structure ready to enter the blocked
\r
1696 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1697 xEntryTimeSet = pdTRUE;
\r
1701 /* Entry time was already set. */
\r
1702 mtCOVERAGE_TEST_MARKER();
\r
1706 taskEXIT_CRITICAL();
\r
1708 /* Interrupts and other tasks can send to and receive from the queue
\r
1709 now the critical section has been exited. */
\r
1711 vTaskSuspendAll();
\r
1712 prvLockQueue( pxQueue );
\r
1714 /* Update the timeout state to see if it has expired yet. */
\r
1715 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1717 /* Timeout has not expired yet, check to see if there is data in the
\r
1718 queue now, and if not enter the Blocked state to wait for data. */
\r
1719 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1721 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
\r
1722 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1723 prvUnlockQueue( pxQueue );
\r
1724 if( xTaskResumeAll() == pdFALSE )
\r
1726 portYIELD_WITHIN_API();
\r
1730 mtCOVERAGE_TEST_MARKER();
\r
1735 /* There is data in the queue now, so don't enter the blocked
\r
1736 state, instead return to try and obtain the data. */
\r
1737 prvUnlockQueue( pxQueue );
\r
1738 ( void ) xTaskResumeAll();
\r
1743 /* The timeout has expired. If there is still no data in the queue
\r
1744 exit, otherwise go back and try to read the data again. */
\r
1745 prvUnlockQueue( pxQueue );
\r
1746 ( void ) xTaskResumeAll();
\r
1748 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1750 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1751 return errQUEUE_EMPTY;
\r
1755 mtCOVERAGE_TEST_MARKER();
\r
1760 /*-----------------------------------------------------------*/
\r
1762 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1764 BaseType_t xReturn;
\r
1765 UBaseType_t uxSavedInterruptStatus;
\r
1766 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1768 configASSERT( pxQueue );
\r
1769 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1771 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1772 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1773 above the maximum system call priority are kept permanently enabled, even
\r
1774 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1775 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1776 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1777 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1778 assigned a priority above the configured maximum system call priority.
\r
1779 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1780 that have been assigned a priority at or (logically) below the maximum
\r
1781 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1782 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1783 More information (albeit Cortex-M specific) is provided on the following
\r
1784 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1785 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1787 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1789 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1791 /* Cannot block in an ISR, so check there is data available. */
\r
1792 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1794 const int8_t cRxLock = pxQueue->cRxLock;
\r
1796 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1798 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1799 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1801 /* If the queue is locked the event list will not be modified.
\r
1802 Instead update the lock count so the task that unlocks the queue
\r
1803 will know that an ISR has removed data while the queue was
\r
1805 if( cRxLock == queueUNLOCKED )
\r
1807 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1809 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1811 /* The task waiting has a higher priority than us so
\r
1812 force a context switch. */
\r
1813 if( pxHigherPriorityTaskWoken != NULL )
\r
1815 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1819 mtCOVERAGE_TEST_MARKER();
\r
1824 mtCOVERAGE_TEST_MARKER();
\r
1829 mtCOVERAGE_TEST_MARKER();
\r
1834 /* Increment the lock count so the task that unlocks the queue
\r
1835 knows that data was removed while it was locked. */
\r
1836 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
\r
1844 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1847 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1851 /*-----------------------------------------------------------*/
\r
1853 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1855 BaseType_t xReturn;
\r
1856 UBaseType_t uxSavedInterruptStatus;
\r
1857 int8_t *pcOriginalReadPosition;
\r
1858 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1860 configASSERT( pxQueue );
\r
1861 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1862 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1864 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1865 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1866 above the maximum system call priority are kept permanently enabled, even
\r
1867 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1868 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1869 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1870 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1871 assigned a priority above the configured maximum system call priority.
\r
1872 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1873 that have been assigned a priority at or (logically) below the maximum
\r
1874 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1875 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1876 More information (albeit Cortex-M specific) is provided on the following
\r
1877 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1878 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1880 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1882 /* Cannot block in an ISR, so check there is data available. */
\r
1883 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1885 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1887 /* Remember the read position so it can be reset as nothing is
\r
1888 actually being removed from the queue. */
\r
1889 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1890 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1891 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1898 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1901 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1905 /*-----------------------------------------------------------*/
\r
1907 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1909 UBaseType_t uxReturn;
\r
1911 configASSERT( xQueue );
\r
1913 taskENTER_CRITICAL();
\r
1915 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1917 taskEXIT_CRITICAL();
\r
1920 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1921 /*-----------------------------------------------------------*/
\r
1923 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1925 UBaseType_t uxReturn;
\r
1928 pxQueue = ( Queue_t * ) xQueue;
\r
1929 configASSERT( pxQueue );
\r
1931 taskENTER_CRITICAL();
\r
1933 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1935 taskEXIT_CRITICAL();
\r
1938 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1939 /*-----------------------------------------------------------*/
\r
1941 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1943 UBaseType_t uxReturn;
\r
1945 configASSERT( xQueue );
\r
1947 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1950 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1951 /*-----------------------------------------------------------*/
\r
1953 void vQueueDelete( QueueHandle_t xQueue )
\r
1955 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1957 configASSERT( pxQueue );
\r
1958 traceQUEUE_DELETE( pxQueue );
\r
1960 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1962 vQueueUnregisterQueue( pxQueue );
\r
1966 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
\r
1968 /* The queue can only have been allocated dynamically - free it
\r
1970 vPortFree( pxQueue );
\r
1972 #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
1974 /* The queue could have been allocated statically or dynamically, so
\r
1975 check before attempting to free the memory. */
\r
1976 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
\r
1978 vPortFree( pxQueue );
\r
1982 mtCOVERAGE_TEST_MARKER();
\r
1987 /* The queue must have been statically allocated, so is not going to be
\r
1988 deleted. Avoid compiler warnings about the unused parameter. */
\r
1991 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
1993 /*-----------------------------------------------------------*/
\r
1995 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1997 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1999 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
2002 #endif /* configUSE_TRACE_FACILITY */
\r
2003 /*-----------------------------------------------------------*/
\r
2005 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2007 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
2009 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
2012 #endif /* configUSE_TRACE_FACILITY */
\r
2013 /*-----------------------------------------------------------*/
\r
2015 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2017 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
2019 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
2022 #endif /* configUSE_TRACE_FACILITY */
\r
2023 /*-----------------------------------------------------------*/
\r
2025 #if( configUSE_MUTEXES == 1 )
\r
2027 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
\r
2029 UBaseType_t uxHighestPriorityOfWaitingTasks;
\r
2031 /* If a task waiting for a mutex causes the mutex holder to inherit a
\r
2032 priority, but the waiting task times out, then the holder should
\r
2033 disinherit the priority - but only down to the highest priority of any
\r
2034 other tasks that are waiting for the same mutex. For this purpose,
\r
2035 return the priority of the highest priority task that is waiting for the
\r
2037 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0 )
\r
2039 uxHighestPriorityOfWaitingTasks = configMAX_PRIORITIES - listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
\r
2043 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
\r
2046 return uxHighestPriorityOfWaitingTasks;
\r
2049 #endif /* configUSE_MUTEXES */
\r
2050 /*-----------------------------------------------------------*/
\r
2052 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
2054 BaseType_t xReturn = pdFALSE;
\r
2055 UBaseType_t uxMessagesWaiting;
\r
2057 /* This function is called from a critical section. */
\r
2059 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
2061 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
2063 #if ( configUSE_MUTEXES == 1 )
\r
2065 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
2067 /* The mutex is no longer being held. */
\r
2068 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
2069 pxQueue->pxMutexHolder = NULL;
\r
2073 mtCOVERAGE_TEST_MARKER();
\r
2076 #endif /* configUSE_MUTEXES */
\r
2078 else if( xPosition == queueSEND_TO_BACK )
\r
2080 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
2081 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
2082 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2084 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
2088 mtCOVERAGE_TEST_MARKER();
\r
2093 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2094 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
2095 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2097 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
2101 mtCOVERAGE_TEST_MARKER();
\r
2104 if( xPosition == queueOVERWRITE )
\r
2106 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2108 /* An item is not being added but overwritten, so subtract
\r
2109 one from the recorded number of items in the queue so when
\r
2110 one is added again below the number of recorded items remains
\r
2112 --uxMessagesWaiting;
\r
2116 mtCOVERAGE_TEST_MARKER();
\r
2121 mtCOVERAGE_TEST_MARKER();
\r
2125 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
2129 /*-----------------------------------------------------------*/
\r
2131 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
2133 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
2135 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2136 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
2138 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2142 mtCOVERAGE_TEST_MARKER();
\r
2144 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
2147 /*-----------------------------------------------------------*/
\r
2149 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
2151 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
2153 /* The lock counts contains the number of extra data items placed or
\r
2154 removed from the queue while the queue was locked. When a queue is
\r
2155 locked items can be added or removed, but the event lists cannot be
\r
2157 taskENTER_CRITICAL();
\r
2159 int8_t cTxLock = pxQueue->cTxLock;
\r
2161 /* See if data was added to the queue while it was locked. */
\r
2162 while( cTxLock > queueLOCKED_UNMODIFIED )
\r
2164 /* Data was posted while the queue was locked. Are any tasks
\r
2165 blocked waiting for data to become available? */
\r
2166 #if ( configUSE_QUEUE_SETS == 1 )
\r
2168 if( pxQueue->pxQueueSetContainer != NULL )
\r
2170 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
2172 /* The queue is a member of a queue set, and posting to
\r
2173 the queue set caused a higher priority task to unblock.
\r
2174 A context switch is required. */
\r
2175 vTaskMissedYield();
\r
2179 mtCOVERAGE_TEST_MARKER();
\r
2184 /* Tasks that are removed from the event list will get
\r
2185 added to the pending ready list as the scheduler is still
\r
2187 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2189 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2191 /* The task waiting has a higher priority so record that a
\r
2192 context switch is required. */
\r
2193 vTaskMissedYield();
\r
2197 mtCOVERAGE_TEST_MARKER();
\r
2206 #else /* configUSE_QUEUE_SETS */
\r
2208 /* Tasks that are removed from the event list will get added to
\r
2209 the pending ready list as the scheduler is still suspended. */
\r
2210 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2212 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2214 /* The task waiting has a higher priority so record that
\r
2215 a context switch is required. */
\r
2216 vTaskMissedYield();
\r
2220 mtCOVERAGE_TEST_MARKER();
\r
2228 #endif /* configUSE_QUEUE_SETS */
\r
2233 pxQueue->cTxLock = queueUNLOCKED;
\r
2235 taskEXIT_CRITICAL();
\r
2237 /* Do the same for the Rx lock. */
\r
2238 taskENTER_CRITICAL();
\r
2240 int8_t cRxLock = pxQueue->cRxLock;
\r
2242 while( cRxLock > queueLOCKED_UNMODIFIED )
\r
2244 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2246 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2248 vTaskMissedYield();
\r
2252 mtCOVERAGE_TEST_MARKER();
\r
2263 pxQueue->cRxLock = queueUNLOCKED;
\r
2265 taskEXIT_CRITICAL();
\r
2267 /*-----------------------------------------------------------*/
\r
2269 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2271 BaseType_t xReturn;
\r
2273 taskENTER_CRITICAL();
\r
2275 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2281 xReturn = pdFALSE;
\r
2284 taskEXIT_CRITICAL();
\r
2288 /*-----------------------------------------------------------*/
\r
2290 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2292 BaseType_t xReturn;
\r
2294 configASSERT( xQueue );
\r
2295 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2301 xReturn = pdFALSE;
\r
2305 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2306 /*-----------------------------------------------------------*/
\r
2308 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2310 BaseType_t xReturn;
\r
2312 taskENTER_CRITICAL();
\r
2314 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2320 xReturn = pdFALSE;
\r
2323 taskEXIT_CRITICAL();
\r
2327 /*-----------------------------------------------------------*/
\r
2329 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2331 BaseType_t xReturn;
\r
2333 configASSERT( xQueue );
\r
2334 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2340 xReturn = pdFALSE;
\r
2344 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2345 /*-----------------------------------------------------------*/
\r
2347 #if ( configUSE_CO_ROUTINES == 1 )
\r
2349 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2351 BaseType_t xReturn;
\r
2352 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2354 /* If the queue is already full we may have to block. A critical section
\r
2355 is required to prevent an interrupt removing something from the queue
\r
2356 between the check to see if the queue is full and blocking on the queue. */
\r
2357 portDISABLE_INTERRUPTS();
\r
2359 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2361 /* The queue is full - do we want to block or just leave without
\r
2363 if( xTicksToWait > ( TickType_t ) 0 )
\r
2365 /* As this is called from a coroutine we cannot block directly, but
\r
2366 return indicating that we need to block. */
\r
2367 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2368 portENABLE_INTERRUPTS();
\r
2369 return errQUEUE_BLOCKED;
\r
2373 portENABLE_INTERRUPTS();
\r
2374 return errQUEUE_FULL;
\r
2378 portENABLE_INTERRUPTS();
\r
2380 portDISABLE_INTERRUPTS();
\r
2382 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2384 /* There is room in the queue, copy the data into the queue. */
\r
2385 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2388 /* Were any co-routines waiting for data to become available? */
\r
2389 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2391 /* In this instance the co-routine could be placed directly
\r
2392 into the ready list as we are within a critical section.
\r
2393 Instead the same pending ready list mechanism is used as if
\r
2394 the event were caused from within an interrupt. */
\r
2395 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2397 /* The co-routine waiting has a higher priority so record
\r
2398 that a yield might be appropriate. */
\r
2399 xReturn = errQUEUE_YIELD;
\r
2403 mtCOVERAGE_TEST_MARKER();
\r
2408 mtCOVERAGE_TEST_MARKER();
\r
2413 xReturn = errQUEUE_FULL;
\r
2416 portENABLE_INTERRUPTS();
\r
2421 #endif /* configUSE_CO_ROUTINES */
\r
2422 /*-----------------------------------------------------------*/
\r
2424 #if ( configUSE_CO_ROUTINES == 1 )
\r
2426 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2428 BaseType_t xReturn;
\r
2429 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2431 /* If the queue is already empty we may have to block. A critical section
\r
2432 is required to prevent an interrupt adding something to the queue
\r
2433 between the check to see if the queue is empty and blocking on the queue. */
\r
2434 portDISABLE_INTERRUPTS();
\r
2436 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2438 /* There are no messages in the queue, do we want to block or just
\r
2439 leave with nothing? */
\r
2440 if( xTicksToWait > ( TickType_t ) 0 )
\r
2442 /* As this is a co-routine we cannot block directly, but return
\r
2443 indicating that we need to block. */
\r
2444 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2445 portENABLE_INTERRUPTS();
\r
2446 return errQUEUE_BLOCKED;
\r
2450 portENABLE_INTERRUPTS();
\r
2451 return errQUEUE_FULL;
\r
2456 mtCOVERAGE_TEST_MARKER();
\r
2459 portENABLE_INTERRUPTS();
\r
2461 portDISABLE_INTERRUPTS();
\r
2463 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2465 /* Data is available from the queue. */
\r
2466 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2467 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2469 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2473 mtCOVERAGE_TEST_MARKER();
\r
2475 --( pxQueue->uxMessagesWaiting );
\r
2476 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2480 /* Were any co-routines waiting for space to become available? */
\r
2481 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2483 /* In this instance the co-routine could be placed directly
\r
2484 into the ready list as we are within a critical section.
\r
2485 Instead the same pending ready list mechanism is used as if
\r
2486 the event were caused from within an interrupt. */
\r
2487 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2489 xReturn = errQUEUE_YIELD;
\r
2493 mtCOVERAGE_TEST_MARKER();
\r
2498 mtCOVERAGE_TEST_MARKER();
\r
2506 portENABLE_INTERRUPTS();
\r
2511 #endif /* configUSE_CO_ROUTINES */
\r
2512 /*-----------------------------------------------------------*/
\r
2514 #if ( configUSE_CO_ROUTINES == 1 )
\r
2516 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2518 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2520 /* Cannot block within an ISR so if there is no space on the queue then
\r
2521 exit without doing anything. */
\r
2522 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2524 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2526 /* We only want to wake one co-routine per ISR, so check that a
\r
2527 co-routine has not already been woken. */
\r
2528 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2530 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2532 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2538 mtCOVERAGE_TEST_MARKER();
\r
2543 mtCOVERAGE_TEST_MARKER();
\r
2548 mtCOVERAGE_TEST_MARKER();
\r
2553 mtCOVERAGE_TEST_MARKER();
\r
2556 return xCoRoutinePreviouslyWoken;
\r
2559 #endif /* configUSE_CO_ROUTINES */
\r
2560 /*-----------------------------------------------------------*/
\r
2562 #if ( configUSE_CO_ROUTINES == 1 )
\r
2564 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2566 BaseType_t xReturn;
\r
2567 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2569 /* We cannot block from an ISR, so check there is data available. If
\r
2570 not then just leave without doing anything. */
\r
2571 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2573 /* Copy the data from the queue. */
\r
2574 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2575 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2577 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2581 mtCOVERAGE_TEST_MARKER();
\r
2583 --( pxQueue->uxMessagesWaiting );
\r
2584 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2586 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2588 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2590 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2592 *pxCoRoutineWoken = pdTRUE;
\r
2596 mtCOVERAGE_TEST_MARKER();
\r
2601 mtCOVERAGE_TEST_MARKER();
\r
2606 mtCOVERAGE_TEST_MARKER();
\r
2619 #endif /* configUSE_CO_ROUTINES */
\r
2620 /*-----------------------------------------------------------*/
\r
2622 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2624 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2628 /* See if there is an empty space in the registry. A NULL name denotes
\r
2630 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2632 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2634 /* Store the information on this queue. */
\r
2635 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2636 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2638 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2643 mtCOVERAGE_TEST_MARKER();
\r
2648 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2649 /*-----------------------------------------------------------*/
\r
2651 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2653 const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2656 const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2658 /* Note there is nothing here to protect against another task adding or
\r
2659 removing entries from the registry while it is being searched. */
\r
2660 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2662 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2664 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2669 mtCOVERAGE_TEST_MARKER();
\r
2674 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
\r
2676 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2677 /*-----------------------------------------------------------*/
\r
2679 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2681 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2685 /* See if the handle of the queue being unregistered in actually in the
\r
2687 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2689 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2691 /* Set the name to NULL to show that this slot if free again. */
\r
2692 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2694 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2695 appear in the registry twice if it is added, removed, then
\r
2697 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2702 mtCOVERAGE_TEST_MARKER();
\r
2706 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2708 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2709 /*-----------------------------------------------------------*/
\r
2711 #if ( configUSE_TIMERS == 1 )
\r
2713 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2715 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2717 /* This function should not be called by application code hence the
\r
2718 'Restricted' in its name. It is not part of the public API. It is
\r
2719 designed for use by kernel code, and has special calling requirements.
\r
2720 It can result in vListInsert() being called on a list that can only
\r
2721 possibly ever have one item in it, so the list will be fast, but even
\r
2722 so it should be called with the scheduler locked and not from a critical
\r
2725 /* Only do anything if there are no messages in the queue. This function
\r
2726 will not actually cause the task to block, just place it on a blocked
\r
2727 list. It will not block until the scheduler is unlocked - at which
\r
2728 time a yield will be performed. If an item is added to the queue while
\r
2729 the queue is locked, and the calling task blocks on the queue, then the
\r
2730 calling task will be immediately unblocked when the queue is unlocked. */
\r
2731 prvLockQueue( pxQueue );
\r
2732 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2734 /* There is nothing in the queue, block for the specified period. */
\r
2735 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2739 mtCOVERAGE_TEST_MARKER();
\r
2741 prvUnlockQueue( pxQueue );
\r
2744 #endif /* configUSE_TIMERS */
\r
2745 /*-----------------------------------------------------------*/
\r
2747 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
2749 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2751 QueueSetHandle_t pxQueue;
\r
2753 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2758 #endif /* configUSE_QUEUE_SETS */
\r
2759 /*-----------------------------------------------------------*/
\r
2761 #if ( configUSE_QUEUE_SETS == 1 )
\r
2763 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2765 BaseType_t xReturn;
\r
2767 taskENTER_CRITICAL();
\r
2769 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2771 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2774 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2776 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2777 items in the queue/semaphore. */
\r
2782 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2786 taskEXIT_CRITICAL();
\r
2791 #endif /* configUSE_QUEUE_SETS */
\r
2792 /*-----------------------------------------------------------*/
\r
2794 #if ( configUSE_QUEUE_SETS == 1 )
\r
2796 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2798 BaseType_t xReturn;
\r
2799 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2801 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2803 /* The queue was not a member of the set. */
\r
2806 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2808 /* It is dangerous to remove a queue from a set when the queue is
\r
2809 not empty because the queue set will still hold pending events for
\r
2815 taskENTER_CRITICAL();
\r
2817 /* The queue is no longer contained in the set. */
\r
2818 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2820 taskEXIT_CRITICAL();
\r
2825 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2827 #endif /* configUSE_QUEUE_SETS */
\r
2828 /*-----------------------------------------------------------*/
\r
2830 #if ( configUSE_QUEUE_SETS == 1 )
\r
2832 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2834 QueueSetMemberHandle_t xReturn = NULL;
\r
2836 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2840 #endif /* configUSE_QUEUE_SETS */
\r
2841 /*-----------------------------------------------------------*/
\r
2843 #if ( configUSE_QUEUE_SETS == 1 )
\r
2845 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2847 QueueSetMemberHandle_t xReturn = NULL;
\r
2849 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2853 #endif /* configUSE_QUEUE_SETS */
\r
2854 /*-----------------------------------------------------------*/
\r
2856 #if ( configUSE_QUEUE_SETS == 1 )
\r
2858 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2860 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2861 BaseType_t xReturn = pdFALSE;
\r
2863 /* This function must be called form a critical section. */
\r
2865 configASSERT( pxQueueSetContainer );
\r
2866 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2868 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2870 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
\r
2872 traceQUEUE_SEND( pxQueueSetContainer );
\r
2874 /* The data copied is the handle of the queue that contains data. */
\r
2875 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2877 if( cTxLock == queueUNLOCKED )
\r
2879 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2881 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2883 /* The task waiting has a higher priority. */
\r
2888 mtCOVERAGE_TEST_MARKER();
\r
2893 mtCOVERAGE_TEST_MARKER();
\r
2898 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
2903 mtCOVERAGE_TEST_MARKER();
\r
2909 #endif /* configUSE_QUEUE_SETS */
\r