2 FreeRTOS V8.1.0 - Copyright (C) 2014 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 ***************************************************************************
\r
9 * FreeRTOS provides completely free yet professionally developed, *
\r
10 * robust, strictly quality controlled, supported, and cross *
\r
11 * platform software that has become a de facto standard. *
\r
13 * Help yourself get started quickly and support the FreeRTOS *
\r
14 * project by purchasing a FreeRTOS tutorial book, reference *
\r
15 * manual, or both from: http://www.FreeRTOS.org/Documentation *
\r
19 ***************************************************************************
\r
21 This file is part of the FreeRTOS distribution.
\r
23 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
24 the terms of the GNU General Public License (version 2) as published by the
\r
25 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
27 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
28 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
29 >>! obliged to provide the source code for proprietary components !<<
\r
30 >>! outside of the FreeRTOS kernel. !<<
\r
32 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
33 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
34 FOR A PARTICULAR PURPOSE. Full license text is available from the following
\r
35 link: http://www.freertos.org/a00114.html
\r
39 ***************************************************************************
\r
41 * Having a problem? Start by reading the FAQ "My application does *
\r
42 * not run, what could be wrong?" *
\r
44 * http://www.FreeRTOS.org/FAQHelp.html *
\r
46 ***************************************************************************
\r
48 http://www.FreeRTOS.org - Documentation, books, training, latest versions,
\r
49 license and Real Time Engineers Ltd. contact details.
\r
51 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
52 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
53 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
55 http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
\r
56 Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
57 licenses offer ticketed support, indemnification and middleware.
\r
59 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
60 engineered and independently SIL3 certified version for use in safety and
\r
61 mission critical applications that require provable dependability.
\r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
70 all the API functions to use the MPU wrappers. That should only be done when
\r
71 task.h is included from an application file. */
\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
74 #include "FreeRTOS.h"
\r
78 #if ( configUSE_CO_ROUTINES == 1 )
\r
79 #include "croutine.h"
\r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
84 header files above, but not in this file, in order to generate the correct
\r
85 privileged Vs unprivileged linkage and placement. */
\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
89 /* Constants used with the xRxLock and xTxLock structure members. */
\r
90 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
91 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
93 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
94 pcTail members are used as pointers into the queue storage area. When the
\r
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
97 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
98 names to the pcHead and pcTail structure members to ensure the readability of
\r
99 the code is maintained despite this dual use of two structure members. An
\r
100 alternative implementation would be to use a union, but use of a union is
\r
101 against the coding standard (although an exception to the standard has been
\r
102 permitted where the dual use also significantly changes the type of the
\r
103 structure member). */
\r
104 #define pxMutexHolder pcTail
\r
105 #define uxQueueType pcHead
\r
106 #define queueQUEUE_IS_MUTEX NULL
\r
108 /* Semaphores do not actually store or copy data, so have an item size of
\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
111 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
113 #if( configUSE_PREEMPTION == 0 )
\r
114 /* If the cooperative scheduler is being used then a yield should not be
\r
115 performed just because a higher priority task has been woken. */
\r
116 #define queueYIELD_IF_USING_PREEMPTION()
\r
118 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
122 * Definition of the queue used by the scheduler.
\r
123 * Items are queued by copy, not reference.
\r
125 typedef struct QueueDefinition
\r
127 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
128 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
129 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
131 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
133 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
134 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
137 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
138 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
140 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
141 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
142 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
144 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
145 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
147 #if ( configUSE_TRACE_FACILITY == 1 )
\r
148 UBaseType_t uxQueueNumber;
\r
149 uint8_t ucQueueType;
\r
152 #if ( configUSE_QUEUE_SETS == 1 )
\r
153 struct QueueDefinition *pxQueueSetContainer;
\r
158 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
159 name below to enable the use of older kernel aware debuggers. */
\r
160 typedef xQUEUE Queue_t;
\r
162 /*-----------------------------------------------------------*/
\r
165 * The queue registry is just a means for kernel aware debuggers to locate
\r
166 * queue structures. It has no other purpose so is an optional component.
\r
168 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
170 /* The type stored within the queue registry array. This allows a name
\r
171 to be assigned to each queue making kernel aware debugging a little
\r
172 more user friendly. */
\r
173 typedef struct QUEUE_REGISTRY_ITEM
\r
175 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
176 QueueHandle_t xHandle;
\r
177 } xQueueRegistryItem;
\r
179 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
180 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
182 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
184 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
185 The pcQueueName member of a structure being NULL is indicative of the
\r
186 array position being vacant. */
\r
187 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
189 #endif /* configQUEUE_REGISTRY_SIZE */
\r
192 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
193 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
194 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
195 * queue is locked it will instead increment the appropriate queue lock count
\r
196 * to indicate that a task may require unblocking. When the queue in unlocked
\r
197 * these lock counts are inspected, and the appropriate action taken.
\r
199 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
202 * Uses a critical section to determine if there is any data in a queue.
\r
204 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
206 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
209 * Uses a critical section to determine if there is any space in a queue.
\r
211 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
213 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
216 * Copies an item into the queue, either at the front of the queue or the
\r
217 * back of the queue.
\r
219 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
222 * Copies an item out of a queue.
\r
224 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
226 #if ( configUSE_QUEUE_SETS == 1 )
\r
228 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
229 * the queue set that the queue contains data.
\r
231 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
234 /*-----------------------------------------------------------*/
\r
237 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
238 * accessing the queue event lists.
\r
240 #define prvLockQueue( pxQueue ) \
\r
241 taskENTER_CRITICAL(); \
\r
243 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
245 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
247 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
249 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
252 taskEXIT_CRITICAL()
\r
253 /*-----------------------------------------------------------*/
\r
255 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
257 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
259 configASSERT( pxQueue );
\r
261 taskENTER_CRITICAL();
\r
263 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
264 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
265 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
266 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
267 pxQueue->xRxLock = queueUNLOCKED;
\r
268 pxQueue->xTxLock = queueUNLOCKED;
\r
270 if( xNewQueue == pdFALSE )
\r
272 /* If there are tasks blocked waiting to read from the queue, then
\r
273 the tasks will remain blocked as after this function exits the queue
\r
274 will still be empty. If there are tasks blocked waiting to write to
\r
275 the queue, then one should be unblocked as after this function exits
\r
276 it will be possible to write to it. */
\r
277 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
279 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
281 queueYIELD_IF_USING_PREEMPTION();
\r
285 mtCOVERAGE_TEST_MARKER();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 /* Ensure the event queues start in the correct state. */
\r
296 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
297 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
300 taskEXIT_CRITICAL();
\r
302 /* A value is returned for calling semantic consistency with previous
\r
306 /*-----------------------------------------------------------*/
\r
308 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
310 Queue_t *pxNewQueue;
\r
311 size_t xQueueSizeInBytes;
\r
312 QueueHandle_t xReturn = NULL;
\r
314 /* Remove compiler warnings about unused parameters should
\r
315 configUSE_TRACE_FACILITY not be set to 1. */
\r
316 ( void ) ucQueueType;
\r
318 /* Allocate the new queue structure. */
\r
319 if( uxQueueLength > ( UBaseType_t ) 0 )
\r
321 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
322 if( pxNewQueue != NULL )
\r
324 /* Create the list of pointers to queue items. The queue is one byte
\r
325 longer than asked for to make wrap checking easier/faster. */
\r
326 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
328 pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );
\r
329 if( pxNewQueue->pcHead != NULL )
\r
331 /* Initialise the queue members as described above where the
\r
332 queue type is defined. */
\r
333 pxNewQueue->uxLength = uxQueueLength;
\r
334 pxNewQueue->uxItemSize = uxItemSize;
\r
335 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
337 #if ( configUSE_TRACE_FACILITY == 1 )
\r
339 pxNewQueue->ucQueueType = ucQueueType;
\r
341 #endif /* configUSE_TRACE_FACILITY */
\r
343 #if( configUSE_QUEUE_SETS == 1 )
\r
345 pxNewQueue->pxQueueSetContainer = NULL;
\r
347 #endif /* configUSE_QUEUE_SETS */
\r
349 traceQUEUE_CREATE( pxNewQueue );
\r
350 xReturn = pxNewQueue;
\r
354 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
355 vPortFree( pxNewQueue );
\r
360 mtCOVERAGE_TEST_MARKER();
\r
365 mtCOVERAGE_TEST_MARKER();
\r
368 configASSERT( xReturn );
\r
372 /*-----------------------------------------------------------*/
\r
374 #if ( configUSE_MUTEXES == 1 )
\r
376 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
378 Queue_t *pxNewQueue;
\r
380 /* Prevent compiler warnings about unused parameters if
\r
381 configUSE_TRACE_FACILITY does not equal 1. */
\r
382 ( void ) ucQueueType;
\r
384 /* Allocate the new queue structure. */
\r
385 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
386 if( pxNewQueue != NULL )
\r
388 /* Information required for priority inheritance. */
\r
389 pxNewQueue->pxMutexHolder = NULL;
\r
390 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
392 /* Queues used as a mutex no data is actually copied into or out
\r
394 pxNewQueue->pcWriteTo = NULL;
\r
395 pxNewQueue->u.pcReadFrom = NULL;
\r
397 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
398 an item size of 0 as nothing is actually copied into or out
\r
400 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
401 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
402 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
403 pxNewQueue->xRxLock = queueUNLOCKED;
\r
404 pxNewQueue->xTxLock = queueUNLOCKED;
\r
406 #if ( configUSE_TRACE_FACILITY == 1 )
\r
408 pxNewQueue->ucQueueType = ucQueueType;
\r
412 #if ( configUSE_QUEUE_SETS == 1 )
\r
414 pxNewQueue->pxQueueSetContainer = NULL;
\r
418 /* Ensure the event queues start with the correct state. */
\r
419 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
420 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
422 traceCREATE_MUTEX( pxNewQueue );
\r
424 /* Start with the semaphore in the expected state. */
\r
425 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
429 traceCREATE_MUTEX_FAILED();
\r
432 configASSERT( pxNewQueue );
\r
436 #endif /* configUSE_MUTEXES */
\r
437 /*-----------------------------------------------------------*/
\r
439 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
441 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
445 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
446 be called directly. Note: This is a good way of determining if the
\r
447 calling task is the mutex holder, but not a good way of determining the
\r
448 identity of the mutex holder, as the holder may change between the
\r
449 following critical section exiting and the function returning. */
\r
450 taskENTER_CRITICAL();
\r
452 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
454 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
461 taskEXIT_CRITICAL();
\r
467 /*-----------------------------------------------------------*/
\r
469 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
471 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
473 BaseType_t xReturn;
\r
474 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
476 configASSERT( pxMutex );
\r
478 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
479 change outside of this task. If this task does not hold the mutex then
\r
480 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
481 this is the only condition we are interested in it does not matter if
\r
482 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
483 mutual exclusion is required to test the pxMutexHolder variable. */
\r
484 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
486 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
488 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
489 the task handle, therefore no underflow check is required. Also,
\r
490 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
491 there can only be one, no mutual exclusion is required to modify the
\r
492 uxRecursiveCallCount member. */
\r
493 ( pxMutex->u.uxRecursiveCallCount )--;
\r
495 /* Have we unwound the call count? */
\r
496 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
498 /* Return the mutex. This will automatically unblock any other
\r
499 task that might be waiting to access the mutex. */
\r
500 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
504 mtCOVERAGE_TEST_MARKER();
\r
511 /* The mutex cannot be given because the calling task is not the
\r
515 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
521 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
522 /*-----------------------------------------------------------*/
\r
524 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
526 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
528 BaseType_t xReturn;
\r
529 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
531 configASSERT( pxMutex );
\r
533 /* Comments regarding mutual exclusion as per those within
\r
534 xQueueGiveMutexRecursive(). */
\r
536 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
538 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
540 ( pxMutex->u.uxRecursiveCallCount )++;
\r
545 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
547 /* pdPASS will only be returned if the mutex was successfully
\r
548 obtained. The calling task may have entered the Blocked state
\r
549 before reaching here. */
\r
550 if( xReturn == pdPASS )
\r
552 ( pxMutex->u.uxRecursiveCallCount )++;
\r
556 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
563 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
564 /*-----------------------------------------------------------*/
\r
566 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
568 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
570 QueueHandle_t xHandle;
\r
572 configASSERT( uxMaxCount != 0 );
\r
573 configASSERT( uxInitialCount <= uxMaxCount );
\r
575 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
577 if( xHandle != NULL )
\r
579 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
581 traceCREATE_COUNTING_SEMAPHORE();
\r
585 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
588 configASSERT( xHandle );
\r
592 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
593 /*-----------------------------------------------------------*/
\r
595 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
597 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
598 TimeOut_t xTimeOut;
\r
599 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
601 configASSERT( pxQueue );
\r
602 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
603 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
604 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
606 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
611 /* This function relaxes the coding standard somewhat to allow return
\r
612 statements within the function itself. This is done in the interest
\r
613 of execution time efficiency. */
\r
616 taskENTER_CRITICAL();
\r
618 /* Is there room on the queue now? The running task must be
\r
619 the highest priority task wanting to access the queue. If
\r
620 the head item in the queue is to be overwritten then it does
\r
621 not matter if the queue is full. */
\r
622 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
624 traceQUEUE_SEND( pxQueue );
\r
625 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
627 #if ( configUSE_QUEUE_SETS == 1 )
\r
629 if( pxQueue->pxQueueSetContainer != NULL )
\r
631 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
633 /* The queue is a member of a queue set, and posting
\r
634 to the queue set caused a higher priority task to
\r
635 unblock. A context switch is required. */
\r
636 queueYIELD_IF_USING_PREEMPTION();
\r
640 mtCOVERAGE_TEST_MARKER();
\r
645 /* If there was a task waiting for data to arrive on the
\r
646 queue then unblock it now. */
\r
647 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
649 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
651 /* The unblocked task has a priority higher than
\r
652 our own so yield immediately. Yes it is ok to
\r
653 do this from within the critical section - the
\r
654 kernel takes care of that. */
\r
655 queueYIELD_IF_USING_PREEMPTION();
\r
659 mtCOVERAGE_TEST_MARKER();
\r
662 else if( xYieldRequired != pdFALSE )
\r
664 /* This path is a special case that will only get
\r
665 executed if the task was holding multiple mutexes
\r
666 and the mutexes were given back in an order that is
\r
667 different to that in which they were taken. */
\r
668 queueYIELD_IF_USING_PREEMPTION();
\r
672 mtCOVERAGE_TEST_MARKER();
\r
676 #else /* configUSE_QUEUE_SETS */
\r
678 /* If there was a task waiting for data to arrive on the
\r
679 queue then unblock it now. */
\r
680 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
682 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
684 /* The unblocked task has a priority higher than
\r
685 our own so yield immediately. Yes it is ok to do
\r
686 this from within the critical section - the kernel
\r
687 takes care of that. */
\r
688 queueYIELD_IF_USING_PREEMPTION();
\r
692 mtCOVERAGE_TEST_MARKER();
\r
695 else if( xYieldRequired != pdFALSE )
\r
697 /* This path is a special case that will only get
\r
698 executed if the task was holding multiple mutexes and
\r
699 the mutexes were given back in an order that is
\r
700 different to that in which they were taken. */
\r
701 queueYIELD_IF_USING_PREEMPTION();
\r
705 mtCOVERAGE_TEST_MARKER();
\r
708 #endif /* configUSE_QUEUE_SETS */
\r
710 taskEXIT_CRITICAL();
\r
715 if( xTicksToWait == ( TickType_t ) 0 )
\r
717 /* The queue was full and no block time is specified (or
\r
718 the block time has expired) so leave now. */
\r
719 taskEXIT_CRITICAL();
\r
721 /* Return to the original privilege level before exiting
\r
723 traceQUEUE_SEND_FAILED( pxQueue );
\r
724 return errQUEUE_FULL;
\r
726 else if( xEntryTimeSet == pdFALSE )
\r
728 /* The queue was full and a block time was specified so
\r
729 configure the timeout structure. */
\r
730 vTaskSetTimeOutState( &xTimeOut );
\r
731 xEntryTimeSet = pdTRUE;
\r
735 /* Entry time was already set. */
\r
736 mtCOVERAGE_TEST_MARKER();
\r
740 taskEXIT_CRITICAL();
\r
742 /* Interrupts and other tasks can send to and receive from the queue
\r
743 now the critical section has been exited. */
\r
746 prvLockQueue( pxQueue );
\r
748 /* Update the timeout state to see if it has expired yet. */
\r
749 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
751 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
753 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
754 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
756 /* Unlocking the queue means queue events can effect the
\r
757 event list. It is possible that interrupts occurring now
\r
758 remove this task from the event list again - but as the
\r
759 scheduler is suspended the task will go onto the pending
\r
760 ready last instead of the actual ready list. */
\r
761 prvUnlockQueue( pxQueue );
\r
763 /* Resuming the scheduler will move tasks from the pending
\r
764 ready list into the ready list - so it is feasible that this
\r
765 task is already in a ready list before it yields - in which
\r
766 case the yield will not cause a context switch unless there
\r
767 is also a higher priority task in the pending ready list. */
\r
768 if( xTaskResumeAll() == pdFALSE )
\r
770 portYIELD_WITHIN_API();
\r
776 prvUnlockQueue( pxQueue );
\r
777 ( void ) xTaskResumeAll();
\r
782 /* The timeout has expired. */
\r
783 prvUnlockQueue( pxQueue );
\r
784 ( void ) xTaskResumeAll();
\r
786 /* Return to the original privilege level before exiting the
\r
788 traceQUEUE_SEND_FAILED( pxQueue );
\r
789 return errQUEUE_FULL;
\r
793 /*-----------------------------------------------------------*/
\r
795 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
797 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
799 BaseType_t xEntryTimeSet = pdFALSE;
\r
800 TimeOut_t xTimeOut;
\r
801 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
803 configASSERT( pxQueue );
\r
804 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
808 taskENTER_CRITICAL();
\r
810 /* Is there room on the queue now? To be running we must be
\r
811 the highest priority task wanting to access the queue. */
\r
812 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
814 traceQUEUE_SEND( pxQueue );
\r
815 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
817 /* If there was a task waiting for data to arrive on the
\r
818 queue then unblock it now. */
\r
819 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
821 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
823 /* The unblocked task has a priority higher than
\r
824 our own so yield immediately. */
\r
825 portYIELD_WITHIN_API();
\r
829 mtCOVERAGE_TEST_MARKER();
\r
834 mtCOVERAGE_TEST_MARKER();
\r
837 taskEXIT_CRITICAL();
\r
842 if( xTicksToWait == ( TickType_t ) 0 )
\r
844 taskEXIT_CRITICAL();
\r
845 return errQUEUE_FULL;
\r
847 else if( xEntryTimeSet == pdFALSE )
\r
849 vTaskSetTimeOutState( &xTimeOut );
\r
850 xEntryTimeSet = pdTRUE;
\r
854 taskEXIT_CRITICAL();
\r
856 taskENTER_CRITICAL();
\r
858 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
860 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
862 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
863 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
864 portYIELD_WITHIN_API();
\r
868 mtCOVERAGE_TEST_MARKER();
\r
873 taskEXIT_CRITICAL();
\r
874 traceQUEUE_SEND_FAILED( pxQueue );
\r
875 return errQUEUE_FULL;
\r
878 taskEXIT_CRITICAL();
\r
882 #endif /* configUSE_ALTERNATIVE_API */
\r
883 /*-----------------------------------------------------------*/
\r
885 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
887 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
889 BaseType_t xEntryTimeSet = pdFALSE;
\r
890 TimeOut_t xTimeOut;
\r
891 int8_t *pcOriginalReadPosition;
\r
892 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
894 configASSERT( pxQueue );
\r
895 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
899 taskENTER_CRITICAL();
\r
901 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
903 /* Remember our read position in case we are just peeking. */
\r
904 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
906 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
908 if( xJustPeeking == pdFALSE )
\r
910 traceQUEUE_RECEIVE( pxQueue );
\r
912 /* Data is actually being removed (not just peeked). */
\r
913 --( pxQueue->uxMessagesWaiting );
\r
915 #if ( configUSE_MUTEXES == 1 )
\r
917 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
919 /* Record the information required to implement
\r
920 priority inheritance should it become necessary. */
\r
921 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
925 mtCOVERAGE_TEST_MARKER();
\r
930 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
932 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
934 portYIELD_WITHIN_API();
\r
938 mtCOVERAGE_TEST_MARKER();
\r
944 traceQUEUE_PEEK( pxQueue );
\r
946 /* We are not removing the data, so reset our read
\r
948 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
950 /* The data is being left in the queue, so see if there are
\r
951 any other tasks waiting for the data. */
\r
952 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
954 /* Tasks that are removed from the event list will get added to
\r
955 the pending ready list as the scheduler is still suspended. */
\r
956 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
958 /* The task waiting has a higher priority than this task. */
\r
959 portYIELD_WITHIN_API();
\r
963 mtCOVERAGE_TEST_MARKER();
\r
968 mtCOVERAGE_TEST_MARKER();
\r
972 taskEXIT_CRITICAL();
\r
977 if( xTicksToWait == ( TickType_t ) 0 )
\r
979 taskEXIT_CRITICAL();
\r
980 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
981 return errQUEUE_EMPTY;
\r
983 else if( xEntryTimeSet == pdFALSE )
\r
985 vTaskSetTimeOutState( &xTimeOut );
\r
986 xEntryTimeSet = pdTRUE;
\r
990 taskEXIT_CRITICAL();
\r
992 taskENTER_CRITICAL();
\r
994 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
996 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
998 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1000 #if ( configUSE_MUTEXES == 1 )
\r
1002 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1004 taskENTER_CRITICAL();
\r
1006 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1008 taskEXIT_CRITICAL();
\r
1012 mtCOVERAGE_TEST_MARKER();
\r
1017 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1018 portYIELD_WITHIN_API();
\r
1022 mtCOVERAGE_TEST_MARKER();
\r
1027 taskEXIT_CRITICAL();
\r
1028 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1029 return errQUEUE_EMPTY;
\r
1032 taskEXIT_CRITICAL();
\r
1037 #endif /* configUSE_ALTERNATIVE_API */
\r
1038 /*-----------------------------------------------------------*/
\r
1040 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1042 BaseType_t xReturn;
\r
1043 UBaseType_t uxSavedInterruptStatus;
\r
1044 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1046 configASSERT( pxQueue );
\r
1047 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1048 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1050 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1051 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1052 above the maximum system call priority are kept permanently enabled, even
\r
1053 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1054 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1055 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1056 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1057 assigned a priority above the configured maximum system call priority.
\r
1058 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1059 that have been assigned a priority at or (logically) below the maximum
\r
1060 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1061 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1062 More information (albeit Cortex-M specific) is provided on the following
\r
1063 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1064 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1066 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1067 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1068 read, instead return a flag to say whether a context switch is required or
\r
1069 not (i.e. has a task with a higher priority than us been woken by this
\r
1071 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1073 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1075 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1077 if( prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ) != pdFALSE )
\r
1079 /* This is a special case that can only be executed if a task
\r
1080 holds multiple mutexes and then gives the mutexes back in an
\r
1081 order that is different to that in which they were taken. */
\r
1082 if( pxHigherPriorityTaskWoken != NULL )
\r
1084 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1088 mtCOVERAGE_TEST_MARKER();
\r
1092 /* The event list is not altered if the queue is locked. This will
\r
1093 be done when the queue is unlocked later. */
\r
1094 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1096 #if ( configUSE_QUEUE_SETS == 1 )
\r
1098 if( pxQueue->pxQueueSetContainer != NULL )
\r
1100 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1102 /* The queue is a member of a queue set, and posting
\r
1103 to the queue set caused a higher priority task to
\r
1104 unblock. A context switch is required. */
\r
1105 if( pxHigherPriorityTaskWoken != NULL )
\r
1107 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1111 mtCOVERAGE_TEST_MARKER();
\r
1116 mtCOVERAGE_TEST_MARKER();
\r
1121 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1123 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1125 /* The task waiting has a higher priority so
\r
1126 record that a context switch is required. */
\r
1127 if( pxHigherPriorityTaskWoken != NULL )
\r
1129 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1133 mtCOVERAGE_TEST_MARKER();
\r
1138 mtCOVERAGE_TEST_MARKER();
\r
1143 mtCOVERAGE_TEST_MARKER();
\r
1147 #else /* configUSE_QUEUE_SETS */
\r
1149 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1151 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1153 /* The task waiting has a higher priority so record that a
\r
1154 context switch is required. */
\r
1155 if( pxHigherPriorityTaskWoken != NULL )
\r
1157 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1161 mtCOVERAGE_TEST_MARKER();
\r
1166 mtCOVERAGE_TEST_MARKER();
\r
1171 mtCOVERAGE_TEST_MARKER();
\r
1174 #endif /* configUSE_QUEUE_SETS */
\r
1178 /* Increment the lock count so the task that unlocks the queue
\r
1179 knows that data was posted while it was locked. */
\r
1180 ++( pxQueue->xTxLock );
\r
1187 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1188 xReturn = errQUEUE_FULL;
\r
1191 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1195 /*-----------------------------------------------------------*/
\r
1197 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1199 BaseType_t xEntryTimeSet = pdFALSE;
\r
1200 TimeOut_t xTimeOut;
\r
1201 int8_t *pcOriginalReadPosition;
\r
1202 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1204 configASSERT( pxQueue );
\r
1205 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1206 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1208 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1212 /* This function relaxes the coding standard somewhat to allow return
\r
1213 statements within the function itself. This is done in the interest
\r
1214 of execution time efficiency. */
\r
1218 taskENTER_CRITICAL();
\r
1220 /* Is there data in the queue now? To be running we must be
\r
1221 the highest priority task wanting to access the queue. */
\r
1222 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1224 /* Remember the read position in case the queue is only being
\r
1226 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1228 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1230 if( xJustPeeking == pdFALSE )
\r
1232 traceQUEUE_RECEIVE( pxQueue );
\r
1234 /* Actually removing data, not just peeking. */
\r
1235 --( pxQueue->uxMessagesWaiting );
\r
1237 #if ( configUSE_MUTEXES == 1 )
\r
1239 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1241 /* Record the information required to implement
\r
1242 priority inheritance should it become necessary. */
\r
1243 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1247 mtCOVERAGE_TEST_MARKER();
\r
1252 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1254 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1256 queueYIELD_IF_USING_PREEMPTION();
\r
1260 mtCOVERAGE_TEST_MARKER();
\r
1265 mtCOVERAGE_TEST_MARKER();
\r
1270 traceQUEUE_PEEK( pxQueue );
\r
1272 /* The data is not being removed, so reset the read
\r
1274 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1276 /* The data is being left in the queue, so see if there are
\r
1277 any other tasks waiting for the data. */
\r
1278 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1280 /* Tasks that are removed from the event list will get added to
\r
1281 the pending ready list as the scheduler is still suspended. */
\r
1282 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1284 /* The task waiting has a higher priority than this task. */
\r
1285 queueYIELD_IF_USING_PREEMPTION();
\r
1289 mtCOVERAGE_TEST_MARKER();
\r
1294 mtCOVERAGE_TEST_MARKER();
\r
1298 taskEXIT_CRITICAL();
\r
1303 if( xTicksToWait == ( TickType_t ) 0 )
\r
1305 /* The queue was empty and no block time is specified (or
\r
1306 the block time has expired) so leave now. */
\r
1307 taskEXIT_CRITICAL();
\r
1308 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1309 return errQUEUE_EMPTY;
\r
1311 else if( xEntryTimeSet == pdFALSE )
\r
1313 /* The queue was empty and a block time was specified so
\r
1314 configure the timeout structure. */
\r
1315 vTaskSetTimeOutState( &xTimeOut );
\r
1316 xEntryTimeSet = pdTRUE;
\r
1320 /* Entry time was already set. */
\r
1321 mtCOVERAGE_TEST_MARKER();
\r
1325 taskEXIT_CRITICAL();
\r
1327 /* Interrupts and other tasks can send to and receive from the queue
\r
1328 now the critical section has been exited. */
\r
1330 vTaskSuspendAll();
\r
1331 prvLockQueue( pxQueue );
\r
1333 /* Update the timeout state to see if it has expired yet. */
\r
1334 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1336 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1338 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1340 #if ( configUSE_MUTEXES == 1 )
\r
1342 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1344 taskENTER_CRITICAL();
\r
1346 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1348 taskEXIT_CRITICAL();
\r
1352 mtCOVERAGE_TEST_MARKER();
\r
1357 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1358 prvUnlockQueue( pxQueue );
\r
1359 if( xTaskResumeAll() == pdFALSE )
\r
1361 portYIELD_WITHIN_API();
\r
1365 mtCOVERAGE_TEST_MARKER();
\r
1371 prvUnlockQueue( pxQueue );
\r
1372 ( void ) xTaskResumeAll();
\r
1377 prvUnlockQueue( pxQueue );
\r
1378 ( void ) xTaskResumeAll();
\r
1379 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1380 return errQUEUE_EMPTY;
\r
1384 /*-----------------------------------------------------------*/
\r
1386 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1388 BaseType_t xReturn;
\r
1389 UBaseType_t uxSavedInterruptStatus;
\r
1390 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1392 configASSERT( pxQueue );
\r
1393 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1395 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1396 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1397 above the maximum system call priority are kept permanently enabled, even
\r
1398 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1399 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1400 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1401 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1402 assigned a priority above the configured maximum system call priority.
\r
1403 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1404 that have been assigned a priority at or (logically) below the maximum
\r
1405 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1406 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1407 More information (albeit Cortex-M specific) is provided on the following
\r
1408 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1409 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1411 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1413 /* Cannot block in an ISR, so check there is data available. */
\r
1414 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1416 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1418 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1419 --( pxQueue->uxMessagesWaiting );
\r
1421 /* If the queue is locked the event list will not be modified.
\r
1422 Instead update the lock count so the task that unlocks the queue
\r
1423 will know that an ISR has removed data while the queue was
\r
1425 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1427 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1429 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1431 /* The task waiting has a higher priority than us so
\r
1432 force a context switch. */
\r
1433 if( pxHigherPriorityTaskWoken != NULL )
\r
1435 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1439 mtCOVERAGE_TEST_MARKER();
\r
1444 mtCOVERAGE_TEST_MARKER();
\r
1449 mtCOVERAGE_TEST_MARKER();
\r
1454 /* Increment the lock count so the task that unlocks the queue
\r
1455 knows that data was removed while it was locked. */
\r
1456 ++( pxQueue->xRxLock );
\r
1464 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1467 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1471 /*-----------------------------------------------------------*/
\r
1473 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1475 BaseType_t xReturn;
\r
1476 UBaseType_t uxSavedInterruptStatus;
\r
1477 int8_t *pcOriginalReadPosition;
\r
1478 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1480 configASSERT( pxQueue );
\r
1481 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1483 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1484 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1485 above the maximum system call priority are kept permanently enabled, even
\r
1486 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1487 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1488 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1489 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1490 assigned a priority above the configured maximum system call priority.
\r
1491 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1492 that have been assigned a priority at or (logically) below the maximum
\r
1493 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1494 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1495 More information (albeit Cortex-M specific) is provided on the following
\r
1496 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1497 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1499 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1501 /* Cannot block in an ISR, so check there is data available. */
\r
1502 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1504 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1506 /* Remember the read position so it can be reset as nothing is
\r
1507 actually being removed from the queue. */
\r
1508 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1509 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1510 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1517 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1520 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1524 /*-----------------------------------------------------------*/
\r
1526 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1528 UBaseType_t uxReturn;
\r
1530 configASSERT( xQueue );
\r
1532 taskENTER_CRITICAL();
\r
1534 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1536 taskEXIT_CRITICAL();
\r
1539 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1540 /*-----------------------------------------------------------*/
\r
1542 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1544 UBaseType_t uxReturn;
\r
1547 pxQueue = ( Queue_t * ) xQueue;
\r
1548 configASSERT( pxQueue );
\r
1550 taskENTER_CRITICAL();
\r
1552 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1554 taskEXIT_CRITICAL();
\r
1557 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1558 /*-----------------------------------------------------------*/
\r
1560 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1562 UBaseType_t uxReturn;
\r
1564 configASSERT( xQueue );
\r
1566 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1569 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1570 /*-----------------------------------------------------------*/
\r
1572 void vQueueDelete( QueueHandle_t xQueue )
\r
1574 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1576 configASSERT( pxQueue );
\r
1578 traceQUEUE_DELETE( pxQueue );
\r
1579 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1581 vQueueUnregisterQueue( pxQueue );
\r
1584 if( pxQueue->pcHead != NULL )
\r
1586 vPortFree( pxQueue->pcHead );
\r
1588 vPortFree( pxQueue );
\r
1590 /*-----------------------------------------------------------*/
\r
1592 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1594 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1596 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1599 #endif /* configUSE_TRACE_FACILITY */
\r
1600 /*-----------------------------------------------------------*/
\r
1602 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1604 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1606 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1609 #endif /* configUSE_TRACE_FACILITY */
\r
1610 /*-----------------------------------------------------------*/
\r
1612 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1614 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1616 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1619 #endif /* configUSE_TRACE_FACILITY */
\r
1620 /*-----------------------------------------------------------*/
\r
1622 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1624 BaseType_t xReturn = pdFALSE;
\r
1626 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1628 #if ( configUSE_MUTEXES == 1 )
\r
1630 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1632 /* The mutex is no longer being held. */
\r
1633 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1634 pxQueue->pxMutexHolder = NULL;
\r
1638 mtCOVERAGE_TEST_MARKER();
\r
1641 #endif /* configUSE_MUTEXES */
\r
1643 else if( xPosition == queueSEND_TO_BACK )
\r
1645 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1646 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1647 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1649 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1653 mtCOVERAGE_TEST_MARKER();
\r
1658 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1659 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1660 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1662 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1666 mtCOVERAGE_TEST_MARKER();
\r
1669 if( xPosition == queueOVERWRITE )
\r
1671 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1673 /* An item is not being added but overwritten, so subtract
\r
1674 one from the recorded number of items in the queue so when
\r
1675 one is added again below the number of recorded items remains
\r
1677 --( pxQueue->uxMessagesWaiting );
\r
1681 mtCOVERAGE_TEST_MARKER();
\r
1686 mtCOVERAGE_TEST_MARKER();
\r
1690 ++( pxQueue->uxMessagesWaiting );
\r
1694 /*-----------------------------------------------------------*/
\r
1696 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1698 if( pxQueue->uxItemSize != 0 )
\r
1700 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1701 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1703 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1707 mtCOVERAGE_TEST_MARKER();
\r
1709 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1712 /*-----------------------------------------------------------*/
\r
1714 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1716 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1718 /* The lock counts contains the number of extra data items placed or
\r
1719 removed from the queue while the queue was locked. When a queue is
\r
1720 locked items can be added or removed, but the event lists cannot be
\r
1722 taskENTER_CRITICAL();
\r
1724 /* See if data was added to the queue while it was locked. */
\r
1725 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1727 /* Data was posted while the queue was locked. Are any tasks
\r
1728 blocked waiting for data to become available? */
\r
1729 #if ( configUSE_QUEUE_SETS == 1 )
\r
1731 if( pxQueue->pxQueueSetContainer != NULL )
\r
1733 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1735 /* The queue is a member of a queue set, and posting to
\r
1736 the queue set caused a higher priority task to unblock.
\r
1737 A context switch is required. */
\r
1738 vTaskMissedYield();
\r
1742 mtCOVERAGE_TEST_MARKER();
\r
1747 /* Tasks that are removed from the event list will get added to
\r
1748 the pending ready list as the scheduler is still suspended. */
\r
1749 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1751 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1753 /* The task waiting has a higher priority so record that a
\r
1754 context switch is required. */
\r
1755 vTaskMissedYield();
\r
1759 mtCOVERAGE_TEST_MARKER();
\r
1768 #else /* configUSE_QUEUE_SETS */
\r
1770 /* Tasks that are removed from the event list will get added to
\r
1771 the pending ready list as the scheduler is still suspended. */
\r
1772 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1774 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1776 /* The task waiting has a higher priority so record that a
\r
1777 context switch is required. */
\r
1778 vTaskMissedYield();
\r
1782 mtCOVERAGE_TEST_MARKER();
\r
1790 #endif /* configUSE_QUEUE_SETS */
\r
1792 --( pxQueue->xTxLock );
\r
1795 pxQueue->xTxLock = queueUNLOCKED;
\r
1797 taskEXIT_CRITICAL();
\r
1799 /* Do the same for the Rx lock. */
\r
1800 taskENTER_CRITICAL();
\r
1802 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1804 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1806 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1808 vTaskMissedYield();
\r
1812 mtCOVERAGE_TEST_MARKER();
\r
1815 --( pxQueue->xRxLock );
\r
1823 pxQueue->xRxLock = queueUNLOCKED;
\r
1825 taskEXIT_CRITICAL();
\r
1827 /*-----------------------------------------------------------*/
\r
1829 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1831 BaseType_t xReturn;
\r
1833 taskENTER_CRITICAL();
\r
1835 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1841 xReturn = pdFALSE;
\r
1844 taskEXIT_CRITICAL();
\r
1848 /*-----------------------------------------------------------*/
\r
1850 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
1852 BaseType_t xReturn;
\r
1854 configASSERT( xQueue );
\r
1855 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1861 xReturn = pdFALSE;
\r
1865 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1866 /*-----------------------------------------------------------*/
\r
1868 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
1870 BaseType_t xReturn;
\r
1872 taskENTER_CRITICAL();
\r
1874 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
1880 xReturn = pdFALSE;
\r
1883 taskEXIT_CRITICAL();
\r
1887 /*-----------------------------------------------------------*/
\r
1889 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
1891 BaseType_t xReturn;
\r
1893 configASSERT( xQueue );
\r
1894 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
1900 xReturn = pdFALSE;
\r
1904 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1905 /*-----------------------------------------------------------*/
\r
1907 #if ( configUSE_CO_ROUTINES == 1 )
\r
1909 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
1911 BaseType_t xReturn;
\r
1912 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1914 /* If the queue is already full we may have to block. A critical section
\r
1915 is required to prevent an interrupt removing something from the queue
\r
1916 between the check to see if the queue is full and blocking on the queue. */
\r
1917 portDISABLE_INTERRUPTS();
\r
1919 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
1921 /* The queue is full - do we want to block or just leave without
\r
1923 if( xTicksToWait > ( TickType_t ) 0 )
\r
1925 /* As this is called from a coroutine we cannot block directly, but
\r
1926 return indicating that we need to block. */
\r
1927 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
1928 portENABLE_INTERRUPTS();
\r
1929 return errQUEUE_BLOCKED;
\r
1933 portENABLE_INTERRUPTS();
\r
1934 return errQUEUE_FULL;
\r
1938 portENABLE_INTERRUPTS();
\r
1940 portDISABLE_INTERRUPTS();
\r
1942 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1944 /* There is room in the queue, copy the data into the queue. */
\r
1945 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
1948 /* Were any co-routines waiting for data to become available? */
\r
1949 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1951 /* In this instance the co-routine could be placed directly
\r
1952 into the ready list as we are within a critical section.
\r
1953 Instead the same pending ready list mechanism is used as if
\r
1954 the event were caused from within an interrupt. */
\r
1955 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1957 /* The co-routine waiting has a higher priority so record
\r
1958 that a yield might be appropriate. */
\r
1959 xReturn = errQUEUE_YIELD;
\r
1963 mtCOVERAGE_TEST_MARKER();
\r
1968 mtCOVERAGE_TEST_MARKER();
\r
1973 xReturn = errQUEUE_FULL;
\r
1976 portENABLE_INTERRUPTS();
\r
1981 #endif /* configUSE_CO_ROUTINES */
\r
1982 /*-----------------------------------------------------------*/
\r
1984 #if ( configUSE_CO_ROUTINES == 1 )
\r
1986 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
1988 BaseType_t xReturn;
\r
1989 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1991 /* If the queue is already empty we may have to block. A critical section
\r
1992 is required to prevent an interrupt adding something to the queue
\r
1993 between the check to see if the queue is empty and blocking on the queue. */
\r
1994 portDISABLE_INTERRUPTS();
\r
1996 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1998 /* There are no messages in the queue, do we want to block or just
\r
1999 leave with nothing? */
\r
2000 if( xTicksToWait > ( TickType_t ) 0 )
\r
2002 /* As this is a co-routine we cannot block directly, but return
\r
2003 indicating that we need to block. */
\r
2004 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2005 portENABLE_INTERRUPTS();
\r
2006 return errQUEUE_BLOCKED;
\r
2010 portENABLE_INTERRUPTS();
\r
2011 return errQUEUE_FULL;
\r
2016 mtCOVERAGE_TEST_MARKER();
\r
2019 portENABLE_INTERRUPTS();
\r
2021 portDISABLE_INTERRUPTS();
\r
2023 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2025 /* Data is available from the queue. */
\r
2026 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2027 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2029 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2033 mtCOVERAGE_TEST_MARKER();
\r
2035 --( pxQueue->uxMessagesWaiting );
\r
2036 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2040 /* Were any co-routines waiting for space to become available? */
\r
2041 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2043 /* In this instance the co-routine could be placed directly
\r
2044 into the ready list as we are within a critical section.
\r
2045 Instead the same pending ready list mechanism is used as if
\r
2046 the event were caused from within an interrupt. */
\r
2047 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2049 xReturn = errQUEUE_YIELD;
\r
2053 mtCOVERAGE_TEST_MARKER();
\r
2058 mtCOVERAGE_TEST_MARKER();
\r
2066 portENABLE_INTERRUPTS();
\r
2071 #endif /* configUSE_CO_ROUTINES */
\r
2072 /*-----------------------------------------------------------*/
\r
2074 #if ( configUSE_CO_ROUTINES == 1 )
\r
2076 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2078 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2080 /* Cannot block within an ISR so if there is no space on the queue then
\r
2081 exit without doing anything. */
\r
2082 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2084 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2086 /* We only want to wake one co-routine per ISR, so check that a
\r
2087 co-routine has not already been woken. */
\r
2088 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2090 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2092 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2098 mtCOVERAGE_TEST_MARKER();
\r
2103 mtCOVERAGE_TEST_MARKER();
\r
2108 mtCOVERAGE_TEST_MARKER();
\r
2113 mtCOVERAGE_TEST_MARKER();
\r
2116 return xCoRoutinePreviouslyWoken;
\r
2119 #endif /* configUSE_CO_ROUTINES */
\r
2120 /*-----------------------------------------------------------*/
\r
2122 #if ( configUSE_CO_ROUTINES == 1 )
\r
2124 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2126 BaseType_t xReturn;
\r
2127 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2129 /* We cannot block from an ISR, so check there is data available. If
\r
2130 not then just leave without doing anything. */
\r
2131 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2133 /* Copy the data from the queue. */
\r
2134 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2135 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2137 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2141 mtCOVERAGE_TEST_MARKER();
\r
2143 --( pxQueue->uxMessagesWaiting );
\r
2144 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2146 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2148 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2150 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2152 *pxCoRoutineWoken = pdTRUE;
\r
2156 mtCOVERAGE_TEST_MARKER();
\r
2161 mtCOVERAGE_TEST_MARKER();
\r
2166 mtCOVERAGE_TEST_MARKER();
\r
2179 #endif /* configUSE_CO_ROUTINES */
\r
2180 /*-----------------------------------------------------------*/
\r
2182 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2184 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2188 /* See if there is an empty space in the registry. A NULL name denotes
\r
2190 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2192 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2194 /* Store the information on this queue. */
\r
2195 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2196 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2198 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2203 mtCOVERAGE_TEST_MARKER();
\r
2208 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2209 /*-----------------------------------------------------------*/
\r
2211 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2213 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2217 /* See if the handle of the queue being unregistered in actually in the
\r
2219 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2221 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2223 /* Set the name to NULL to show that this slot if free again. */
\r
2224 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2229 mtCOVERAGE_TEST_MARKER();
\r
2233 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2235 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2236 /*-----------------------------------------------------------*/
\r
2238 #if ( configUSE_TIMERS == 1 )
\r
2240 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
2242 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2244 /* This function should not be called by application code hence the
\r
2245 'Restricted' in its name. It is not part of the public API. It is
\r
2246 designed for use by kernel code, and has special calling requirements.
\r
2247 It can result in vListInsert() being called on a list that can only
\r
2248 possibly ever have one item in it, so the list will be fast, but even
\r
2249 so it should be called with the scheduler locked and not from a critical
\r
2252 /* Only do anything if there are no messages in the queue. This function
\r
2253 will not actually cause the task to block, just place it on a blocked
\r
2254 list. It will not block until the scheduler is unlocked - at which
\r
2255 time a yield will be performed. If an item is added to the queue while
\r
2256 the queue is locked, and the calling task blocks on the queue, then the
\r
2257 calling task will be immediately unblocked when the queue is unlocked. */
\r
2258 prvLockQueue( pxQueue );
\r
2259 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2261 /* There is nothing in the queue, block for the specified period. */
\r
2262 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2266 mtCOVERAGE_TEST_MARKER();
\r
2268 prvUnlockQueue( pxQueue );
\r
2271 #endif /* configUSE_TIMERS */
\r
2272 /*-----------------------------------------------------------*/
\r
2274 #if ( configUSE_QUEUE_SETS == 1 )
\r
2276 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2278 QueueSetHandle_t pxQueue;
\r
2280 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2285 #endif /* configUSE_QUEUE_SETS */
\r
2286 /*-----------------------------------------------------------*/
\r
2288 #if ( configUSE_QUEUE_SETS == 1 )
\r
2290 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2292 BaseType_t xReturn;
\r
2294 taskENTER_CRITICAL();
\r
2296 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2298 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2301 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2303 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2304 items in the queue/semaphore. */
\r
2309 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2313 taskEXIT_CRITICAL();
\r
2318 #endif /* configUSE_QUEUE_SETS */
\r
2319 /*-----------------------------------------------------------*/
\r
2321 #if ( configUSE_QUEUE_SETS == 1 )
\r
2323 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2325 BaseType_t xReturn;
\r
2326 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2328 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2330 /* The queue was not a member of the set. */
\r
2333 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2335 /* It is dangerous to remove a queue from a set when the queue is
\r
2336 not empty because the queue set will still hold pending events for
\r
2342 taskENTER_CRITICAL();
\r
2344 /* The queue is no longer contained in the set. */
\r
2345 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2347 taskEXIT_CRITICAL();
\r
2352 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2354 #endif /* configUSE_QUEUE_SETS */
\r
2355 /*-----------------------------------------------------------*/
\r
2357 #if ( configUSE_QUEUE_SETS == 1 )
\r
2359 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2361 QueueSetMemberHandle_t xReturn = NULL;
\r
2363 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2367 #endif /* configUSE_QUEUE_SETS */
\r
2368 /*-----------------------------------------------------------*/
\r
2370 #if ( configUSE_QUEUE_SETS == 1 )
\r
2372 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2374 QueueSetMemberHandle_t xReturn = NULL;
\r
2376 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2380 #endif /* configUSE_QUEUE_SETS */
\r
2381 /*-----------------------------------------------------------*/
\r
2383 #if ( configUSE_QUEUE_SETS == 1 )
\r
2385 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2387 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2388 BaseType_t xReturn = pdFALSE;
\r
2390 /* This function must be called form a critical section. */
\r
2392 configASSERT( pxQueueSetContainer );
\r
2393 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2395 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2397 traceQUEUE_SEND( pxQueueSetContainer );
\r
2398 /* The data copied is the handle of the queue that contains data. */
\r
2399 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2401 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2403 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2405 /* The task waiting has a higher priority */
\r
2410 mtCOVERAGE_TEST_MARKER();
\r
2415 mtCOVERAGE_TEST_MARKER();
\r
2420 mtCOVERAGE_TEST_MARKER();
\r
2426 #endif /* configUSE_QUEUE_SETS */
\r