2 FreeRTOS V8.0.1 - Copyright (C) 2014 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 ***************************************************************************
\r
9 * FreeRTOS provides completely free yet professionally developed, *
\r
10 * robust, strictly quality controlled, supported, and cross *
\r
11 * platform software that has become a de facto standard. *
\r
13 * Help yourself get started quickly and support the FreeRTOS *
\r
14 * project by purchasing a FreeRTOS tutorial book, reference *
\r
15 * manual, or both from: http://www.FreeRTOS.org/Documentation *
\r
19 ***************************************************************************
\r
21 This file is part of the FreeRTOS distribution.
\r
23 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
24 the terms of the GNU General Public License (version 2) as published by the
\r
25 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
27 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
28 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
29 >>! obliged to provide the source code for proprietary components !<<
\r
30 >>! outside of the FreeRTOS kernel. !<<
\r
32 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
33 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
34 FOR A PARTICULAR PURPOSE. Full license text is available from the following
\r
35 link: http://www.freertos.org/a00114.html
\r
39 ***************************************************************************
\r
41 * Having a problem? Start by reading the FAQ "My application does *
\r
42 * not run, what could be wrong?" *
\r
44 * http://www.FreeRTOS.org/FAQHelp.html *
\r
46 ***************************************************************************
\r
48 http://www.FreeRTOS.org - Documentation, books, training, latest versions,
\r
49 license and Real Time Engineers Ltd. contact details.
\r
51 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
52 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
53 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
55 http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
\r
56 Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
57 licenses offer ticketed support, indemnification and middleware.
\r
59 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
60 engineered and independently SIL3 certified version for use in safety and
\r
61 mission critical applications that require provable dependability.
\r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
70 all the API functions to use the MPU wrappers. That should only be done when
\r
71 task.h is included from an application file. */
\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
74 #include "FreeRTOS.h"
\r
78 #if ( configUSE_CO_ROUTINES == 1 )
\r
79 #include "croutine.h"
\r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
84 header files above, but not in this file, in order to generate the correct
\r
85 privileged Vs unprivileged linkage and placement. */
\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
89 /* Constants used with the xRxLock and xTxLock structure members. */
\r
90 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
91 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
93 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
94 pcTail members are used as pointers into the queue storage area. When the
\r
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
97 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
98 names to the pcHead and pcTail structure members to ensure the readability of
\r
99 the code is maintained despite this dual use of two structure members. An
\r
100 alternative implementation would be to use a union, but use of a union is
\r
101 against the coding standard (although an exception to the standard has been
\r
102 permitted where the dual use also significantly changes the type of the
\r
103 structure member). */
\r
104 #define pxMutexHolder pcTail
\r
105 #define uxQueueType pcHead
\r
106 #define queueQUEUE_IS_MUTEX NULL
\r
108 /* Semaphores do not actually store or copy data, so have an item size of
\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
111 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
113 #if( configUSE_PREEMPTION == 0 )
\r
114 /* If the cooperative scheduler is being used then a yield should not be
\r
115 performed just because a higher priority task has been woken. */
\r
116 #define queueYIELD_IF_USING_PREEMPTION()
\r
118 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
122 * Definition of the queue used by the scheduler.
\r
123 * Items are queued by copy, not reference.
\r
125 typedef struct QueueDefinition
\r
127 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
128 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
129 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
131 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
133 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
134 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
137 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
138 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
140 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
141 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
142 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
144 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
145 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
147 #if ( configUSE_TRACE_FACILITY == 1 )
\r
148 UBaseType_t uxQueueNumber;
\r
149 uint8_t ucQueueType;
\r
152 #if ( configUSE_QUEUE_SETS == 1 )
\r
153 struct QueueDefinition *pxQueueSetContainer;
\r
158 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
159 name below to enable the use of older kernel aware debuggers. */
\r
160 typedef xQUEUE Queue_t;
\r
162 /*-----------------------------------------------------------*/
\r
165 * The queue registry is just a means for kernel aware debuggers to locate
\r
166 * queue structures. It has no other purpose so is an optional component.
\r
168 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
170 /* The type stored within the queue registry array. This allows a name
\r
171 to be assigned to each queue making kernel aware debugging a little
\r
172 more user friendly. */
\r
173 typedef struct QUEUE_REGISTRY_ITEM
\r
175 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
176 QueueHandle_t xHandle;
\r
177 } xQueueRegistryItem;
\r
179 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
180 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
182 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
184 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
185 The pcQueueName member of a structure being NULL is indicative of the
\r
186 array position being vacant. */
\r
187 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
189 #endif /* configQUEUE_REGISTRY_SIZE */
\r
192 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
193 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
194 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
195 * queue is locked it will instead increment the appropriate queue lock count
\r
196 * to indicate that a task may require unblocking. When the queue in unlocked
\r
197 * these lock counts are inspected, and the appropriate action taken.
\r
199 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
202 * Uses a critical section to determine if there is any data in a queue.
\r
204 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
206 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
209 * Uses a critical section to determine if there is any space in a queue.
\r
211 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
213 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
216 * Copies an item into the queue, either at the front of the queue or the
\r
217 * back of the queue.
\r
219 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
222 * Copies an item out of a queue.
\r
224 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
226 #if ( configUSE_QUEUE_SETS == 1 )
\r
228 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
229 * the queue set that the queue contains data.
\r
231 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
234 /*-----------------------------------------------------------*/
\r
237 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
238 * accessing the queue event lists.
\r
240 #define prvLockQueue( pxQueue ) \
\r
241 taskENTER_CRITICAL(); \
\r
243 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
245 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
247 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
249 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
252 taskEXIT_CRITICAL()
\r
253 /*-----------------------------------------------------------*/
\r
255 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
257 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
259 configASSERT( pxQueue );
\r
261 taskENTER_CRITICAL();
\r
263 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
264 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
265 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
266 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
267 pxQueue->xRxLock = queueUNLOCKED;
\r
268 pxQueue->xTxLock = queueUNLOCKED;
\r
270 if( xNewQueue == pdFALSE )
\r
272 /* If there are tasks blocked waiting to read from the queue, then
\r
273 the tasks will remain blocked as after this function exits the queue
\r
274 will still be empty. If there are tasks blocked waiting to write to
\r
275 the queue, then one should be unblocked as after this function exits
\r
276 it will be possible to write to it. */
\r
277 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
279 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
281 queueYIELD_IF_USING_PREEMPTION();
\r
285 mtCOVERAGE_TEST_MARKER();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 /* Ensure the event queues start in the correct state. */
\r
296 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
297 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
300 taskEXIT_CRITICAL();
\r
302 /* A value is returned for calling semantic consistency with previous
\r
306 /*-----------------------------------------------------------*/
\r
308 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
310 Queue_t *pxNewQueue;
\r
311 size_t xQueueSizeInBytes;
\r
312 QueueHandle_t xReturn = NULL;
\r
314 /* Remove compiler warnings about unused parameters should
\r
315 configUSE_TRACE_FACILITY not be set to 1. */
\r
316 ( void ) ucQueueType;
\r
318 /* Allocate the new queue structure. */
\r
319 if( uxQueueLength > ( UBaseType_t ) 0 )
\r
321 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
322 if( pxNewQueue != NULL )
\r
324 /* Create the list of pointers to queue items. The queue is one byte
\r
325 longer than asked for to make wrap checking easier/faster. */
\r
326 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
328 pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );
\r
329 if( pxNewQueue->pcHead != NULL )
\r
331 /* Initialise the queue members as described above where the
\r
332 queue type is defined. */
\r
333 pxNewQueue->uxLength = uxQueueLength;
\r
334 pxNewQueue->uxItemSize = uxItemSize;
\r
335 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
337 #if ( configUSE_TRACE_FACILITY == 1 )
\r
339 pxNewQueue->ucQueueType = ucQueueType;
\r
341 #endif /* configUSE_TRACE_FACILITY */
\r
343 #if( configUSE_QUEUE_SETS == 1 )
\r
345 pxNewQueue->pxQueueSetContainer = NULL;
\r
347 #endif /* configUSE_QUEUE_SETS */
\r
349 traceQUEUE_CREATE( pxNewQueue );
\r
350 xReturn = pxNewQueue;
\r
354 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
355 vPortFree( pxNewQueue );
\r
360 mtCOVERAGE_TEST_MARKER();
\r
365 mtCOVERAGE_TEST_MARKER();
\r
368 configASSERT( xReturn );
\r
372 /*-----------------------------------------------------------*/
\r
374 #if ( configUSE_MUTEXES == 1 )
\r
376 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
378 Queue_t *pxNewQueue;
\r
380 /* Prevent compiler warnings about unused parameters if
\r
381 configUSE_TRACE_FACILITY does not equal 1. */
\r
382 ( void ) ucQueueType;
\r
384 /* Allocate the new queue structure. */
\r
385 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
386 if( pxNewQueue != NULL )
\r
388 /* Information required for priority inheritance. */
\r
389 pxNewQueue->pxMutexHolder = NULL;
\r
390 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
392 /* Queues used as a mutex no data is actually copied into or out
\r
394 pxNewQueue->pcWriteTo = NULL;
\r
395 pxNewQueue->u.pcReadFrom = NULL;
\r
397 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
398 an item size of 0 as nothing is actually copied into or out
\r
400 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
401 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
402 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
403 pxNewQueue->xRxLock = queueUNLOCKED;
\r
404 pxNewQueue->xTxLock = queueUNLOCKED;
\r
406 #if ( configUSE_TRACE_FACILITY == 1 )
\r
408 pxNewQueue->ucQueueType = ucQueueType;
\r
412 #if ( configUSE_QUEUE_SETS == 1 )
\r
414 pxNewQueue->pxQueueSetContainer = NULL;
\r
418 /* Ensure the event queues start with the correct state. */
\r
419 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
420 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
422 traceCREATE_MUTEX( pxNewQueue );
\r
424 /* Start with the semaphore in the expected state. */
\r
425 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
429 traceCREATE_MUTEX_FAILED();
\r
432 configASSERT( pxNewQueue );
\r
436 #endif /* configUSE_MUTEXES */
\r
437 /*-----------------------------------------------------------*/
\r
439 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
441 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
445 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
446 be called directly. Note: This is a good way of determining if the
\r
447 calling task is the mutex holder, but not a good way of determining the
\r
448 identity of the mutex holder, as the holder may change between the
\r
449 following critical section exiting and the function returning. */
\r
450 taskENTER_CRITICAL();
\r
452 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
454 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
461 taskEXIT_CRITICAL();
\r
467 /*-----------------------------------------------------------*/
\r
469 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
471 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
473 BaseType_t xReturn;
\r
474 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
476 configASSERT( pxMutex );
\r
478 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
479 change outside of this task. If this task does not hold the mutex then
\r
480 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
481 this is the only condition we are interested in it does not matter if
\r
482 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
483 mutual exclusion is required to test the pxMutexHolder variable. */
\r
484 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
486 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
488 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
489 the task handle, therefore no underflow check is required. Also,
\r
490 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
491 there can only be one, no mutual exclusion is required to modify the
\r
492 uxRecursiveCallCount member. */
\r
493 ( pxMutex->u.uxRecursiveCallCount )--;
\r
495 /* Have we unwound the call count? */
\r
496 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
498 /* Return the mutex. This will automatically unblock any other
\r
499 task that might be waiting to access the mutex. */
\r
500 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
504 mtCOVERAGE_TEST_MARKER();
\r
511 /* We cannot give the mutex because we are not the holder. */
\r
514 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
520 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
521 /*-----------------------------------------------------------*/
\r
523 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
525 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
527 BaseType_t xReturn;
\r
528 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
530 configASSERT( pxMutex );
\r
532 /* Comments regarding mutual exclusion as per those within
\r
533 xQueueGiveMutexRecursive(). */
\r
535 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
537 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
539 ( pxMutex->u.uxRecursiveCallCount )++;
\r
544 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
546 /* pdPASS will only be returned if we successfully obtained the mutex,
\r
547 we may have blocked to reach here. */
\r
548 if( xReturn == pdPASS )
\r
550 ( pxMutex->u.uxRecursiveCallCount )++;
\r
554 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
561 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
562 /*-----------------------------------------------------------*/
\r
564 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
566 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
568 QueueHandle_t xHandle;
\r
570 configASSERT( uxMaxCount != 0 );
\r
571 configASSERT( uxInitialCount <= uxMaxCount );
\r
573 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
575 if( xHandle != NULL )
\r
577 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
579 traceCREATE_COUNTING_SEMAPHORE();
\r
583 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
586 configASSERT( xHandle );
\r
590 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
591 /*-----------------------------------------------------------*/
\r
593 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
595 BaseType_t xEntryTimeSet = pdFALSE;
\r
596 TimeOut_t xTimeOut;
\r
597 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
599 configASSERT( pxQueue );
\r
600 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
601 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
602 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
604 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
609 /* This function relaxes the coding standard somewhat to allow return
\r
610 statements within the function itself. This is done in the interest
\r
611 of execution time efficiency. */
\r
614 taskENTER_CRITICAL();
\r
616 /* Is there room on the queue now? The running task must be
\r
617 the highest priority task wanting to access the queue. If
\r
618 the head item in the queue is to be overwritten then it does
\r
619 not matter if the queue is full. */
\r
620 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
622 traceQUEUE_SEND( pxQueue );
\r
623 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
625 #if ( configUSE_QUEUE_SETS == 1 )
\r
627 if( pxQueue->pxQueueSetContainer != NULL )
\r
629 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
631 /* The queue is a member of a queue set, and posting
\r
632 to the queue set caused a higher priority task to
\r
633 unblock. A context switch is required. */
\r
634 queueYIELD_IF_USING_PREEMPTION();
\r
638 mtCOVERAGE_TEST_MARKER();
\r
643 /* If there was a task waiting for data to arrive on the
\r
644 queue then unblock it now. */
\r
645 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
647 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
649 /* The unblocked task has a priority higher than
\r
650 our own so yield immediately. Yes it is ok to
\r
651 do this from within the critical section - the
\r
652 kernel takes care of that. */
\r
653 queueYIELD_IF_USING_PREEMPTION();
\r
657 mtCOVERAGE_TEST_MARKER();
\r
662 mtCOVERAGE_TEST_MARKER();
\r
666 #else /* configUSE_QUEUE_SETS */
\r
668 /* If there was a task waiting for data to arrive on the
\r
669 queue then unblock it now. */
\r
670 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
672 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
674 /* The unblocked task has a priority higher than
\r
675 our own so yield immediately. Yes it is ok to do
\r
676 this from within the critical section - the kernel
\r
677 takes care of that. */
\r
678 queueYIELD_IF_USING_PREEMPTION();
\r
682 mtCOVERAGE_TEST_MARKER();
\r
687 mtCOVERAGE_TEST_MARKER();
\r
690 #endif /* configUSE_QUEUE_SETS */
\r
692 taskEXIT_CRITICAL();
\r
694 /* Return to the original privilege level before exiting the
\r
700 if( xTicksToWait == ( TickType_t ) 0 )
\r
702 /* The queue was full and no block time is specified (or
\r
703 the block time has expired) so leave now. */
\r
704 taskEXIT_CRITICAL();
\r
706 /* Return to the original privilege level before exiting
\r
708 traceQUEUE_SEND_FAILED( pxQueue );
\r
709 return errQUEUE_FULL;
\r
711 else if( xEntryTimeSet == pdFALSE )
\r
713 /* The queue was full and a block time was specified so
\r
714 configure the timeout structure. */
\r
715 vTaskSetTimeOutState( &xTimeOut );
\r
716 xEntryTimeSet = pdTRUE;
\r
720 /* Entry time was already set. */
\r
721 mtCOVERAGE_TEST_MARKER();
\r
725 taskEXIT_CRITICAL();
\r
727 /* Interrupts and other tasks can send to and receive from the queue
\r
728 now the critical section has been exited. */
\r
731 prvLockQueue( pxQueue );
\r
733 /* Update the timeout state to see if it has expired yet. */
\r
734 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
736 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
738 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
739 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
741 /* Unlocking the queue means queue events can effect the
\r
742 event list. It is possible that interrupts occurring now
\r
743 remove this task from the event list again - but as the
\r
744 scheduler is suspended the task will go onto the pending
\r
745 ready last instead of the actual ready list. */
\r
746 prvUnlockQueue( pxQueue );
\r
748 /* Resuming the scheduler will move tasks from the pending
\r
749 ready list into the ready list - so it is feasible that this
\r
750 task is already in a ready list before it yields - in which
\r
751 case the yield will not cause a context switch unless there
\r
752 is also a higher priority task in the pending ready list. */
\r
753 if( xTaskResumeAll() == pdFALSE )
\r
755 portYIELD_WITHIN_API();
\r
761 prvUnlockQueue( pxQueue );
\r
762 ( void ) xTaskResumeAll();
\r
767 /* The timeout has expired. */
\r
768 prvUnlockQueue( pxQueue );
\r
769 ( void ) xTaskResumeAll();
\r
771 /* Return to the original privilege level before exiting the
\r
773 traceQUEUE_SEND_FAILED( pxQueue );
\r
774 return errQUEUE_FULL;
\r
778 /*-----------------------------------------------------------*/
\r
780 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
782 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
784 BaseType_t xEntryTimeSet = pdFALSE;
\r
785 TimeOut_t xTimeOut;
\r
786 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
788 configASSERT( pxQueue );
\r
789 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
793 taskENTER_CRITICAL();
\r
795 /* Is there room on the queue now? To be running we must be
\r
796 the highest priority task wanting to access the queue. */
\r
797 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
799 traceQUEUE_SEND( pxQueue );
\r
800 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
802 /* If there was a task waiting for data to arrive on the
\r
803 queue then unblock it now. */
\r
804 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
806 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
808 /* The unblocked task has a priority higher than
\r
809 our own so yield immediately. */
\r
810 portYIELD_WITHIN_API();
\r
814 mtCOVERAGE_TEST_MARKER();
\r
819 mtCOVERAGE_TEST_MARKER();
\r
822 taskEXIT_CRITICAL();
\r
827 if( xTicksToWait == ( TickType_t ) 0 )
\r
829 taskEXIT_CRITICAL();
\r
830 return errQUEUE_FULL;
\r
832 else if( xEntryTimeSet == pdFALSE )
\r
834 vTaskSetTimeOutState( &xTimeOut );
\r
835 xEntryTimeSet = pdTRUE;
\r
839 taskEXIT_CRITICAL();
\r
841 taskENTER_CRITICAL();
\r
843 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
845 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
847 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
848 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
849 portYIELD_WITHIN_API();
\r
853 mtCOVERAGE_TEST_MARKER();
\r
858 taskEXIT_CRITICAL();
\r
859 traceQUEUE_SEND_FAILED( pxQueue );
\r
860 return errQUEUE_FULL;
\r
863 taskEXIT_CRITICAL();
\r
867 #endif /* configUSE_ALTERNATIVE_API */
\r
868 /*-----------------------------------------------------------*/
\r
870 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
872 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
874 BaseType_t xEntryTimeSet = pdFALSE;
\r
875 TimeOut_t xTimeOut;
\r
876 int8_t *pcOriginalReadPosition;
\r
877 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
879 configASSERT( pxQueue );
\r
880 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
884 taskENTER_CRITICAL();
\r
886 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
888 /* Remember our read position in case we are just peeking. */
\r
889 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
891 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
893 if( xJustPeeking == pdFALSE )
\r
895 traceQUEUE_RECEIVE( pxQueue );
\r
897 /* Data is actually being removed (not just peeked). */
\r
898 --( pxQueue->uxMessagesWaiting );
\r
900 #if ( configUSE_MUTEXES == 1 )
\r
902 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
904 /* Record the information required to implement
\r
905 priority inheritance should it become necessary. */
\r
906 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
910 mtCOVERAGE_TEST_MARKER();
\r
915 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
917 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
919 portYIELD_WITHIN_API();
\r
923 mtCOVERAGE_TEST_MARKER();
\r
929 traceQUEUE_PEEK( pxQueue );
\r
931 /* We are not removing the data, so reset our read
\r
933 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
935 /* The data is being left in the queue, so see if there are
\r
936 any other tasks waiting for the data. */
\r
937 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
939 /* Tasks that are removed from the event list will get added to
\r
940 the pending ready list as the scheduler is still suspended. */
\r
941 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
943 /* The task waiting has a higher priority than this task. */
\r
944 portYIELD_WITHIN_API();
\r
948 mtCOVERAGE_TEST_MARKER();
\r
953 mtCOVERAGE_TEST_MARKER();
\r
957 taskEXIT_CRITICAL();
\r
962 if( xTicksToWait == ( TickType_t ) 0 )
\r
964 taskEXIT_CRITICAL();
\r
965 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
966 return errQUEUE_EMPTY;
\r
968 else if( xEntryTimeSet == pdFALSE )
\r
970 vTaskSetTimeOutState( &xTimeOut );
\r
971 xEntryTimeSet = pdTRUE;
\r
975 taskEXIT_CRITICAL();
\r
977 taskENTER_CRITICAL();
\r
979 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
981 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
983 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
985 #if ( configUSE_MUTEXES == 1 )
\r
987 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
989 taskENTER_CRITICAL();
\r
991 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
993 taskEXIT_CRITICAL();
\r
997 mtCOVERAGE_TEST_MARKER();
\r
1002 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1003 portYIELD_WITHIN_API();
\r
1007 mtCOVERAGE_TEST_MARKER();
\r
1012 taskEXIT_CRITICAL();
\r
1013 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1014 return errQUEUE_EMPTY;
\r
1017 taskEXIT_CRITICAL();
\r
1022 #endif /* configUSE_ALTERNATIVE_API */
\r
1023 /*-----------------------------------------------------------*/
\r
1025 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1027 BaseType_t xReturn;
\r
1028 UBaseType_t uxSavedInterruptStatus;
\r
1029 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1031 configASSERT( pxQueue );
\r
1032 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1033 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1035 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1036 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1037 above the maximum system call priority are kept permanently enabled, even
\r
1038 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1039 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1040 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1041 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1042 assigned a priority above the configured maximum system call priority.
\r
1043 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1044 that have been assigned a priority at or (logically) below the maximum
\r
1045 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1046 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1047 More information (albeit Cortex-M specific) is provided on the following
\r
1048 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1049 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1051 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1052 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1053 read, instead return a flag to say whether a context switch is required or
\r
1054 not (i.e. has a task with a higher priority than us been woken by this
\r
1056 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1058 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1060 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1062 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1064 /* The event list is not altered if the queue is locked. This will
\r
1065 be done when the queue is unlocked later. */
\r
1066 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1068 #if ( configUSE_QUEUE_SETS == 1 )
\r
1070 if( pxQueue->pxQueueSetContainer != NULL )
\r
1072 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1074 /* The queue is a member of a queue set, and posting
\r
1075 to the queue set caused a higher priority task to
\r
1076 unblock. A context switch is required. */
\r
1077 if( pxHigherPriorityTaskWoken != NULL )
\r
1079 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1083 mtCOVERAGE_TEST_MARKER();
\r
1088 mtCOVERAGE_TEST_MARKER();
\r
1093 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1095 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1097 /* The task waiting has a higher priority so record that a
\r
1098 context switch is required. */
\r
1099 if( pxHigherPriorityTaskWoken != NULL )
\r
1101 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1105 mtCOVERAGE_TEST_MARKER();
\r
1110 mtCOVERAGE_TEST_MARKER();
\r
1115 mtCOVERAGE_TEST_MARKER();
\r
1119 #else /* configUSE_QUEUE_SETS */
\r
1121 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1123 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1125 /* The task waiting has a higher priority so record that a
\r
1126 context switch is required. */
\r
1127 if( pxHigherPriorityTaskWoken != NULL )
\r
1129 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1133 mtCOVERAGE_TEST_MARKER();
\r
1138 mtCOVERAGE_TEST_MARKER();
\r
1143 mtCOVERAGE_TEST_MARKER();
\r
1146 #endif /* configUSE_QUEUE_SETS */
\r
1150 /* Increment the lock count so the task that unlocks the queue
\r
1151 knows that data was posted while it was locked. */
\r
1152 ++( pxQueue->xTxLock );
\r
1159 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1160 xReturn = errQUEUE_FULL;
\r
1163 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1167 /*-----------------------------------------------------------*/
\r
1169 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1171 BaseType_t xEntryTimeSet = pdFALSE;
\r
1172 TimeOut_t xTimeOut;
\r
1173 int8_t *pcOriginalReadPosition;
\r
1174 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1176 configASSERT( pxQueue );
\r
1177 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1178 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1180 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1184 /* This function relaxes the coding standard somewhat to allow return
\r
1185 statements within the function itself. This is done in the interest
\r
1186 of execution time efficiency. */
\r
1190 taskENTER_CRITICAL();
\r
1192 /* Is there data in the queue now? To be running we must be
\r
1193 the highest priority task wanting to access the queue. */
\r
1194 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1196 /* Remember the read position in case the queue is only being
\r
1198 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1200 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1202 if( xJustPeeking == pdFALSE )
\r
1204 traceQUEUE_RECEIVE( pxQueue );
\r
1206 /* Actually removing data, not just peeking. */
\r
1207 --( pxQueue->uxMessagesWaiting );
\r
1209 #if ( configUSE_MUTEXES == 1 )
\r
1211 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1213 /* Record the information required to implement
\r
1214 priority inheritance should it become necessary. */
\r
1215 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1219 mtCOVERAGE_TEST_MARKER();
\r
1224 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1226 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1228 queueYIELD_IF_USING_PREEMPTION();
\r
1232 mtCOVERAGE_TEST_MARKER();
\r
1237 mtCOVERAGE_TEST_MARKER();
\r
1242 traceQUEUE_PEEK( pxQueue );
\r
1244 /* The data is not being removed, so reset the read
\r
1246 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1248 /* The data is being left in the queue, so see if there are
\r
1249 any other tasks waiting for the data. */
\r
1250 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1252 /* Tasks that are removed from the event list will get added to
\r
1253 the pending ready list as the scheduler is still suspended. */
\r
1254 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1256 /* The task waiting has a higher priority than this task. */
\r
1257 queueYIELD_IF_USING_PREEMPTION();
\r
1261 mtCOVERAGE_TEST_MARKER();
\r
1266 mtCOVERAGE_TEST_MARKER();
\r
1270 taskEXIT_CRITICAL();
\r
1275 if( xTicksToWait == ( TickType_t ) 0 )
\r
1277 /* The queue was empty and no block time is specified (or
\r
1278 the block time has expired) so leave now. */
\r
1279 taskEXIT_CRITICAL();
\r
1280 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1281 return errQUEUE_EMPTY;
\r
1283 else if( xEntryTimeSet == pdFALSE )
\r
1285 /* The queue was empty and a block time was specified so
\r
1286 configure the timeout structure. */
\r
1287 vTaskSetTimeOutState( &xTimeOut );
\r
1288 xEntryTimeSet = pdTRUE;
\r
1292 /* Entry time was already set. */
\r
1293 mtCOVERAGE_TEST_MARKER();
\r
1297 taskEXIT_CRITICAL();
\r
1299 /* Interrupts and other tasks can send to and receive from the queue
\r
1300 now the critical section has been exited. */
\r
1302 vTaskSuspendAll();
\r
1303 prvLockQueue( pxQueue );
\r
1305 /* Update the timeout state to see if it has expired yet. */
\r
1306 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1308 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1310 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1312 #if ( configUSE_MUTEXES == 1 )
\r
1314 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1316 taskENTER_CRITICAL();
\r
1318 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1320 taskEXIT_CRITICAL();
\r
1324 mtCOVERAGE_TEST_MARKER();
\r
1329 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1330 prvUnlockQueue( pxQueue );
\r
1331 if( xTaskResumeAll() == pdFALSE )
\r
1333 portYIELD_WITHIN_API();
\r
1337 mtCOVERAGE_TEST_MARKER();
\r
1343 prvUnlockQueue( pxQueue );
\r
1344 ( void ) xTaskResumeAll();
\r
1349 prvUnlockQueue( pxQueue );
\r
1350 ( void ) xTaskResumeAll();
\r
1351 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1352 return errQUEUE_EMPTY;
\r
1356 /*-----------------------------------------------------------*/
\r
1358 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1360 BaseType_t xReturn;
\r
1361 UBaseType_t uxSavedInterruptStatus;
\r
1362 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1364 configASSERT( pxQueue );
\r
1365 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1367 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1368 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1369 above the maximum system call priority are kept permanently enabled, even
\r
1370 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1371 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1372 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1373 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1374 assigned a priority above the configured maximum system call priority.
\r
1375 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1376 that have been assigned a priority at or (logically) below the maximum
\r
1377 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1378 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1379 More information (albeit Cortex-M specific) is provided on the following
\r
1380 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1381 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1383 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1385 /* Cannot block in an ISR, so check there is data available. */
\r
1386 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1388 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1390 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1391 --( pxQueue->uxMessagesWaiting );
\r
1393 /* If the queue is locked the event list will not be modified.
\r
1394 Instead update the lock count so the task that unlocks the queue
\r
1395 will know that an ISR has removed data while the queue was
\r
1397 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1399 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1401 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1403 /* The task waiting has a higher priority than us so
\r
1404 force a context switch. */
\r
1405 if( pxHigherPriorityTaskWoken != NULL )
\r
1407 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1411 mtCOVERAGE_TEST_MARKER();
\r
1416 mtCOVERAGE_TEST_MARKER();
\r
1421 mtCOVERAGE_TEST_MARKER();
\r
1426 /* Increment the lock count so the task that unlocks the queue
\r
1427 knows that data was removed while it was locked. */
\r
1428 ++( pxQueue->xRxLock );
\r
1436 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1439 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1443 /*-----------------------------------------------------------*/
\r
1445 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1447 BaseType_t xReturn;
\r
1448 UBaseType_t uxSavedInterruptStatus;
\r
1449 int8_t *pcOriginalReadPosition;
\r
1450 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1452 configASSERT( pxQueue );
\r
1453 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1455 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1456 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1457 above the maximum system call priority are kept permanently enabled, even
\r
1458 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1459 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1460 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1461 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1462 assigned a priority above the configured maximum system call priority.
\r
1463 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1464 that have been assigned a priority at or (logically) below the maximum
\r
1465 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1466 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1467 More information (albeit Cortex-M specific) is provided on the following
\r
1468 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1469 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1471 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1473 /* Cannot block in an ISR, so check there is data available. */
\r
1474 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1476 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1478 /* Remember the read position so it can be reset as nothing is
\r
1479 actually being removed from the queue. */
\r
1480 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1481 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1482 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1489 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1492 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1496 /*-----------------------------------------------------------*/
\r
1498 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1500 UBaseType_t uxReturn;
\r
1502 configASSERT( xQueue );
\r
1504 taskENTER_CRITICAL();
\r
1506 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1508 taskEXIT_CRITICAL();
\r
1511 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1512 /*-----------------------------------------------------------*/
\r
1514 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1516 UBaseType_t uxReturn;
\r
1519 pxQueue = ( Queue_t * ) xQueue;
\r
1520 configASSERT( pxQueue );
\r
1522 taskENTER_CRITICAL();
\r
1524 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1526 taskEXIT_CRITICAL();
\r
1529 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1530 /*-----------------------------------------------------------*/
\r
1532 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1534 UBaseType_t uxReturn;
\r
1536 configASSERT( xQueue );
\r
1538 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1541 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1542 /*-----------------------------------------------------------*/
\r
1544 void vQueueDelete( QueueHandle_t xQueue )
\r
1546 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1548 configASSERT( pxQueue );
\r
1550 traceQUEUE_DELETE( pxQueue );
\r
1551 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1553 vQueueUnregisterQueue( pxQueue );
\r
1556 if( pxQueue->pcHead != NULL )
\r
1558 vPortFree( pxQueue->pcHead );
\r
1560 vPortFree( pxQueue );
\r
1562 /*-----------------------------------------------------------*/
\r
1564 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1566 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1568 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1571 #endif /* configUSE_TRACE_FACILITY */
\r
1572 /*-----------------------------------------------------------*/
\r
1574 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1576 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1578 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1581 #endif /* configUSE_TRACE_FACILITY */
\r
1582 /*-----------------------------------------------------------*/
\r
1584 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1586 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1588 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1591 #endif /* configUSE_TRACE_FACILITY */
\r
1592 /*-----------------------------------------------------------*/
\r
1594 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1596 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1598 #if ( configUSE_MUTEXES == 1 )
\r
1600 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1602 /* The mutex is no longer being held. */
\r
1603 vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1604 pxQueue->pxMutexHolder = NULL;
\r
1608 mtCOVERAGE_TEST_MARKER();
\r
1611 #endif /* configUSE_MUTEXES */
\r
1613 else if( xPosition == queueSEND_TO_BACK )
\r
1615 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1616 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1617 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1619 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1623 mtCOVERAGE_TEST_MARKER();
\r
1628 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1629 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1630 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1632 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1636 mtCOVERAGE_TEST_MARKER();
\r
1639 if( xPosition == queueOVERWRITE )
\r
1641 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1643 /* An item is not being added but overwritten, so subtract
\r
1644 one from the recorded number of items in the queue so when
\r
1645 one is added again below the number of recorded items remains
\r
1647 --( pxQueue->uxMessagesWaiting );
\r
1651 mtCOVERAGE_TEST_MARKER();
\r
1656 mtCOVERAGE_TEST_MARKER();
\r
1660 ++( pxQueue->uxMessagesWaiting );
\r
1662 /*-----------------------------------------------------------*/
\r
1664 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1666 if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )
\r
1668 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1669 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1671 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1675 mtCOVERAGE_TEST_MARKER();
\r
1677 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1681 mtCOVERAGE_TEST_MARKER();
\r
1684 /*-----------------------------------------------------------*/
\r
1686 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1688 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1690 /* The lock counts contains the number of extra data items placed or
\r
1691 removed from the queue while the queue was locked. When a queue is
\r
1692 locked items can be added or removed, but the event lists cannot be
\r
1694 taskENTER_CRITICAL();
\r
1696 /* See if data was added to the queue while it was locked. */
\r
1697 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1699 /* Data was posted while the queue was locked. Are any tasks
\r
1700 blocked waiting for data to become available? */
\r
1701 #if ( configUSE_QUEUE_SETS == 1 )
\r
1703 if( pxQueue->pxQueueSetContainer != NULL )
\r
1705 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1707 /* The queue is a member of a queue set, and posting to
\r
1708 the queue set caused a higher priority task to unblock.
\r
1709 A context switch is required. */
\r
1710 vTaskMissedYield();
\r
1714 mtCOVERAGE_TEST_MARKER();
\r
1719 /* Tasks that are removed from the event list will get added to
\r
1720 the pending ready list as the scheduler is still suspended. */
\r
1721 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1723 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1725 /* The task waiting has a higher priority so record that a
\r
1726 context switch is required. */
\r
1727 vTaskMissedYield();
\r
1731 mtCOVERAGE_TEST_MARKER();
\r
1740 #else /* configUSE_QUEUE_SETS */
\r
1742 /* Tasks that are removed from the event list will get added to
\r
1743 the pending ready list as the scheduler is still suspended. */
\r
1744 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1746 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1748 /* The task waiting has a higher priority so record that a
\r
1749 context switch is required. */
\r
1750 vTaskMissedYield();
\r
1754 mtCOVERAGE_TEST_MARKER();
\r
1762 #endif /* configUSE_QUEUE_SETS */
\r
1764 --( pxQueue->xTxLock );
\r
1767 pxQueue->xTxLock = queueUNLOCKED;
\r
1769 taskEXIT_CRITICAL();
\r
1771 /* Do the same for the Rx lock. */
\r
1772 taskENTER_CRITICAL();
\r
1774 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1776 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1778 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1780 vTaskMissedYield();
\r
1784 mtCOVERAGE_TEST_MARKER();
\r
1787 --( pxQueue->xRxLock );
\r
1795 pxQueue->xRxLock = queueUNLOCKED;
\r
1797 taskEXIT_CRITICAL();
\r
1799 /*-----------------------------------------------------------*/
\r
1801 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1803 BaseType_t xReturn;
\r
1805 taskENTER_CRITICAL();
\r
1807 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1813 xReturn = pdFALSE;
\r
1816 taskEXIT_CRITICAL();
\r
1820 /*-----------------------------------------------------------*/
\r
1822 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
1824 BaseType_t xReturn;
\r
1826 configASSERT( xQueue );
\r
1827 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1833 xReturn = pdFALSE;
\r
1837 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1838 /*-----------------------------------------------------------*/
\r
1840 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
1842 BaseType_t xReturn;
\r
1844 taskENTER_CRITICAL();
\r
1846 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
1852 xReturn = pdFALSE;
\r
1855 taskEXIT_CRITICAL();
\r
1859 /*-----------------------------------------------------------*/
\r
1861 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
1863 BaseType_t xReturn;
\r
1865 configASSERT( xQueue );
\r
1866 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
1872 xReturn = pdFALSE;
\r
1876 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1877 /*-----------------------------------------------------------*/
\r
1879 #if ( configUSE_CO_ROUTINES == 1 )
\r
1881 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
1883 BaseType_t xReturn;
\r
1884 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1886 /* If the queue is already full we may have to block. A critical section
\r
1887 is required to prevent an interrupt removing something from the queue
\r
1888 between the check to see if the queue is full and blocking on the queue. */
\r
1889 portDISABLE_INTERRUPTS();
\r
1891 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
1893 /* The queue is full - do we want to block or just leave without
\r
1895 if( xTicksToWait > ( TickType_t ) 0 )
\r
1897 /* As this is called from a coroutine we cannot block directly, but
\r
1898 return indicating that we need to block. */
\r
1899 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
1900 portENABLE_INTERRUPTS();
\r
1901 return errQUEUE_BLOCKED;
\r
1905 portENABLE_INTERRUPTS();
\r
1906 return errQUEUE_FULL;
\r
1910 portENABLE_INTERRUPTS();
\r
1912 portDISABLE_INTERRUPTS();
\r
1914 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1916 /* There is room in the queue, copy the data into the queue. */
\r
1917 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
1920 /* Were any co-routines waiting for data to become available? */
\r
1921 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1923 /* In this instance the co-routine could be placed directly
\r
1924 into the ready list as we are within a critical section.
\r
1925 Instead the same pending ready list mechanism is used as if
\r
1926 the event were caused from within an interrupt. */
\r
1927 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1929 /* The co-routine waiting has a higher priority so record
\r
1930 that a yield might be appropriate. */
\r
1931 xReturn = errQUEUE_YIELD;
\r
1935 mtCOVERAGE_TEST_MARKER();
\r
1940 mtCOVERAGE_TEST_MARKER();
\r
1945 xReturn = errQUEUE_FULL;
\r
1948 portENABLE_INTERRUPTS();
\r
1953 #endif /* configUSE_CO_ROUTINES */
\r
1954 /*-----------------------------------------------------------*/
\r
1956 #if ( configUSE_CO_ROUTINES == 1 )
\r
1958 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
1960 BaseType_t xReturn;
\r
1961 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1963 /* If the queue is already empty we may have to block. A critical section
\r
1964 is required to prevent an interrupt adding something to the queue
\r
1965 between the check to see if the queue is empty and blocking on the queue. */
\r
1966 portDISABLE_INTERRUPTS();
\r
1968 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1970 /* There are no messages in the queue, do we want to block or just
\r
1971 leave with nothing? */
\r
1972 if( xTicksToWait > ( TickType_t ) 0 )
\r
1974 /* As this is a co-routine we cannot block directly, but return
\r
1975 indicating that we need to block. */
\r
1976 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
1977 portENABLE_INTERRUPTS();
\r
1978 return errQUEUE_BLOCKED;
\r
1982 portENABLE_INTERRUPTS();
\r
1983 return errQUEUE_FULL;
\r
1988 mtCOVERAGE_TEST_MARKER();
\r
1991 portENABLE_INTERRUPTS();
\r
1993 portDISABLE_INTERRUPTS();
\r
1995 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1997 /* Data is available from the queue. */
\r
1998 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1999 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2001 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2005 mtCOVERAGE_TEST_MARKER();
\r
2007 --( pxQueue->uxMessagesWaiting );
\r
2008 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2012 /* Were any co-routines waiting for space to become available? */
\r
2013 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2015 /* In this instance the co-routine could be placed directly
\r
2016 into the ready list as we are within a critical section.
\r
2017 Instead the same pending ready list mechanism is used as if
\r
2018 the event were caused from within an interrupt. */
\r
2019 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2021 xReturn = errQUEUE_YIELD;
\r
2025 mtCOVERAGE_TEST_MARKER();
\r
2030 mtCOVERAGE_TEST_MARKER();
\r
2038 portENABLE_INTERRUPTS();
\r
2043 #endif /* configUSE_CO_ROUTINES */
\r
2044 /*-----------------------------------------------------------*/
\r
2046 #if ( configUSE_CO_ROUTINES == 1 )
\r
2048 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2050 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2052 /* Cannot block within an ISR so if there is no space on the queue then
\r
2053 exit without doing anything. */
\r
2054 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2056 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2058 /* We only want to wake one co-routine per ISR, so check that a
\r
2059 co-routine has not already been woken. */
\r
2060 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2062 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2064 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2070 mtCOVERAGE_TEST_MARKER();
\r
2075 mtCOVERAGE_TEST_MARKER();
\r
2080 mtCOVERAGE_TEST_MARKER();
\r
2085 mtCOVERAGE_TEST_MARKER();
\r
2088 return xCoRoutinePreviouslyWoken;
\r
2091 #endif /* configUSE_CO_ROUTINES */
\r
2092 /*-----------------------------------------------------------*/
\r
2094 #if ( configUSE_CO_ROUTINES == 1 )
\r
2096 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2098 BaseType_t xReturn;
\r
2099 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2101 /* We cannot block from an ISR, so check there is data available. If
\r
2102 not then just leave without doing anything. */
\r
2103 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2105 /* Copy the data from the queue. */
\r
2106 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2107 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2109 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2113 mtCOVERAGE_TEST_MARKER();
\r
2115 --( pxQueue->uxMessagesWaiting );
\r
2116 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2118 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2120 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2122 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2124 *pxCoRoutineWoken = pdTRUE;
\r
2128 mtCOVERAGE_TEST_MARKER();
\r
2133 mtCOVERAGE_TEST_MARKER();
\r
2138 mtCOVERAGE_TEST_MARKER();
\r
2151 #endif /* configUSE_CO_ROUTINES */
\r
2152 /*-----------------------------------------------------------*/
\r
2154 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2156 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2160 /* See if there is an empty space in the registry. A NULL name denotes
\r
2162 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2164 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2166 /* Store the information on this queue. */
\r
2167 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2168 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2170 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2175 mtCOVERAGE_TEST_MARKER();
\r
2180 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2181 /*-----------------------------------------------------------*/
\r
2183 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2185 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2189 /* See if the handle of the queue being unregistered in actually in the
\r
2191 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2193 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2195 /* Set the name to NULL to show that this slot if free again. */
\r
2196 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2201 mtCOVERAGE_TEST_MARKER();
\r
2205 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2207 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2208 /*-----------------------------------------------------------*/
\r
2210 #if ( configUSE_TIMERS == 1 )
\r
2212 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
2214 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2216 /* This function should not be called by application code hence the
\r
2217 'Restricted' in its name. It is not part of the public API. It is
\r
2218 designed for use by kernel code, and has special calling requirements.
\r
2219 It can result in vListInsert() being called on a list that can only
\r
2220 possibly ever have one item in it, so the list will be fast, but even
\r
2221 so it should be called with the scheduler locked and not from a critical
\r
2224 /* Only do anything if there are no messages in the queue. This function
\r
2225 will not actually cause the task to block, just place it on a blocked
\r
2226 list. It will not block until the scheduler is unlocked - at which
\r
2227 time a yield will be performed. If an item is added to the queue while
\r
2228 the queue is locked, and the calling task blocks on the queue, then the
\r
2229 calling task will be immediately unblocked when the queue is unlocked. */
\r
2230 prvLockQueue( pxQueue );
\r
2231 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2233 /* There is nothing in the queue, block for the specified period. */
\r
2234 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2238 mtCOVERAGE_TEST_MARKER();
\r
2240 prvUnlockQueue( pxQueue );
\r
2243 #endif /* configUSE_TIMERS */
\r
2244 /*-----------------------------------------------------------*/
\r
2246 #if ( configUSE_QUEUE_SETS == 1 )
\r
2248 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2250 QueueSetHandle_t pxQueue;
\r
2252 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2257 #endif /* configUSE_QUEUE_SETS */
\r
2258 /*-----------------------------------------------------------*/
\r
2260 #if ( configUSE_QUEUE_SETS == 1 )
\r
2262 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2264 BaseType_t xReturn;
\r
2266 taskENTER_CRITICAL();
\r
2268 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2270 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2273 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2275 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2276 items in the queue/semaphore. */
\r
2281 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2285 taskEXIT_CRITICAL();
\r
2290 #endif /* configUSE_QUEUE_SETS */
\r
2291 /*-----------------------------------------------------------*/
\r
2293 #if ( configUSE_QUEUE_SETS == 1 )
\r
2295 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2297 BaseType_t xReturn;
\r
2298 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2300 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2302 /* The queue was not a member of the set. */
\r
2305 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2307 /* It is dangerous to remove a queue from a set when the queue is
\r
2308 not empty because the queue set will still hold pending events for
\r
2314 taskENTER_CRITICAL();
\r
2316 /* The queue is no longer contained in the set. */
\r
2317 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2319 taskEXIT_CRITICAL();
\r
2324 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2326 #endif /* configUSE_QUEUE_SETS */
\r
2327 /*-----------------------------------------------------------*/
\r
2329 #if ( configUSE_QUEUE_SETS == 1 )
\r
2331 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2333 QueueSetMemberHandle_t xReturn = NULL;
\r
2335 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2339 #endif /* configUSE_QUEUE_SETS */
\r
2340 /*-----------------------------------------------------------*/
\r
2342 #if ( configUSE_QUEUE_SETS == 1 )
\r
2344 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2346 QueueSetMemberHandle_t xReturn = NULL;
\r
2348 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2352 #endif /* configUSE_QUEUE_SETS */
\r
2353 /*-----------------------------------------------------------*/
\r
2355 #if ( configUSE_QUEUE_SETS == 1 )
\r
2357 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2359 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2360 BaseType_t xReturn = pdFALSE;
\r
2362 /* This function must be called form a critical section. */
\r
2364 configASSERT( pxQueueSetContainer );
\r
2365 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2367 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2369 traceQUEUE_SEND( pxQueueSetContainer );
\r
2370 /* The data copies is the handle of the queue that contains data. */
\r
2371 prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2372 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2374 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2376 /* The task waiting has a higher priority */
\r
2381 mtCOVERAGE_TEST_MARKER();
\r
2386 mtCOVERAGE_TEST_MARKER();
\r
2391 mtCOVERAGE_TEST_MARKER();
\r
2397 #endif /* configUSE_QUEUE_SETS */
\r