2 FreeRTOS V8.0.1 - Copyright (C) 2014 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 ***************************************************************************
\r
9 * FreeRTOS provides completely free yet professionally developed, *
\r
10 * robust, strictly quality controlled, supported, and cross *
\r
11 * platform software that has become a de facto standard. *
\r
13 * Help yourself get started quickly and support the FreeRTOS *
\r
14 * project by purchasing a FreeRTOS tutorial book, reference *
\r
15 * manual, or both from: http://www.FreeRTOS.org/Documentation *
\r
19 ***************************************************************************
\r
21 This file is part of the FreeRTOS distribution.
\r
23 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
24 the terms of the GNU General Public License (version 2) as published by the
\r
25 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
27 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
28 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
29 >>! obliged to provide the source code for proprietary components !<<
\r
30 >>! outside of the FreeRTOS kernel. !<<
\r
32 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
33 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
34 FOR A PARTICULAR PURPOSE. Full license text is available from the following
\r
35 link: http://www.freertos.org/a00114.html
\r
39 ***************************************************************************
\r
41 * Having a problem? Start by reading the FAQ "My application does *
\r
42 * not run, what could be wrong?" *
\r
44 * http://www.FreeRTOS.org/FAQHelp.html *
\r
46 ***************************************************************************
\r
48 http://www.FreeRTOS.org - Documentation, books, training, latest versions,
\r
49 license and Real Time Engineers Ltd. contact details.
\r
51 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
52 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
53 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
55 http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
\r
56 Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
57 licenses offer ticketed support, indemnification and middleware.
\r
59 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
60 engineered and independently SIL3 certified version for use in safety and
\r
61 mission critical applications that require provable dependability.
\r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
70 all the API functions to use the MPU wrappers. That should only be done when
\r
71 task.h is included from an application file. */
\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
74 #include "FreeRTOS.h"
\r
78 #if ( configUSE_CO_ROUTINES == 1 )
\r
79 #include "croutine.h"
\r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
84 header files above, but not in this file, in order to generate the correct
\r
85 privileged Vs unprivileged linkage and placement. */
\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
89 /* Constants used with the xRxLock and xTxLock structure members. */
\r
90 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
91 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
93 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
94 pcTail members are used as pointers into the queue storage area. When the
\r
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
97 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
98 names to the pcHead and pcTail structure members to ensure the readability of
\r
99 the code is maintained despite this dual use of two structure members. An
\r
100 alternative implementation would be to use a union, but use of a union is
\r
101 against the coding standard (although an exception to the standard has been
\r
102 permitted where the dual use also significantly changes the type of the
\r
103 structure member). */
\r
104 #define pxMutexHolder pcTail
\r
105 #define uxQueueType pcHead
\r
106 #define queueQUEUE_IS_MUTEX NULL
\r
108 /* Semaphores do not actually store or copy data, so have an item size of
\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
111 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
113 #if( configUSE_PREEMPTION == 0 )
\r
114 /* If the cooperative scheduler is being used then a yield should not be
\r
115 performed just because a higher priority task has been woken. */
\r
116 #define queueYIELD_IF_USING_PREEMPTION()
\r
118 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
122 * Definition of the queue used by the scheduler.
\r
123 * Items are queued by copy, not reference.
\r
125 typedef struct QueueDefinition
\r
127 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
128 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
129 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
131 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
133 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
134 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
137 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
138 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
140 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
141 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
142 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
144 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
145 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
147 #if ( configUSE_TRACE_FACILITY == 1 )
\r
148 UBaseType_t uxQueueNumber;
\r
149 uint8_t ucQueueType;
\r
152 #if ( configUSE_QUEUE_SETS == 1 )
\r
153 struct QueueDefinition *pxQueueSetContainer;
\r
158 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
159 name below to enable the use of older kernel aware debuggers. */
\r
160 typedef xQUEUE Queue_t;
\r
162 /*-----------------------------------------------------------*/
\r
165 * The queue registry is just a means for kernel aware debuggers to locate
\r
166 * queue structures. It has no other purpose so is an optional component.
\r
168 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
170 /* The type stored within the queue registry array. This allows a name
\r
171 to be assigned to each queue making kernel aware debugging a little
\r
172 more user friendly. */
\r
173 typedef struct QUEUE_REGISTRY_ITEM
\r
175 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
176 QueueHandle_t xHandle;
\r
177 } xQueueRegistryItem;
\r
179 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
180 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
182 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
184 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
185 The pcQueueName member of a structure being NULL is indicative of the
\r
186 array position being vacant. */
\r
187 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
189 #endif /* configQUEUE_REGISTRY_SIZE */
\r
192 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
193 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
194 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
195 * queue is locked it will instead increment the appropriate queue lock count
\r
196 * to indicate that a task may require unblocking. When the queue in unlocked
\r
197 * these lock counts are inspected, and the appropriate action taken.
\r
199 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
202 * Uses a critical section to determine if there is any data in a queue.
\r
204 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
206 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
209 * Uses a critical section to determine if there is any space in a queue.
\r
211 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
213 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
216 * Copies an item into the queue, either at the front of the queue or the
\r
217 * back of the queue.
\r
219 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
222 * Copies an item out of a queue.
\r
224 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
226 #if ( configUSE_QUEUE_SETS == 1 )
\r
228 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
229 * the queue set that the queue contains data.
\r
231 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
234 /*-----------------------------------------------------------*/
\r
237 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
238 * accessing the queue event lists.
\r
240 #define prvLockQueue( pxQueue ) \
\r
241 taskENTER_CRITICAL(); \
\r
243 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
245 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
247 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
249 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
252 taskEXIT_CRITICAL()
\r
253 /*-----------------------------------------------------------*/
\r
255 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
257 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
259 configASSERT( pxQueue );
\r
261 taskENTER_CRITICAL();
\r
263 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
264 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
265 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
266 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
267 pxQueue->xRxLock = queueUNLOCKED;
\r
268 pxQueue->xTxLock = queueUNLOCKED;
\r
270 if( xNewQueue == pdFALSE )
\r
272 /* If there are tasks blocked waiting to read from the queue, then
\r
273 the tasks will remain blocked as after this function exits the queue
\r
274 will still be empty. If there are tasks blocked waiting to write to
\r
275 the queue, then one should be unblocked as after this function exits
\r
276 it will be possible to write to it. */
\r
277 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
279 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
281 queueYIELD_IF_USING_PREEMPTION();
\r
285 mtCOVERAGE_TEST_MARKER();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 /* Ensure the event queues start in the correct state. */
\r
296 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
297 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
300 taskEXIT_CRITICAL();
\r
302 /* A value is returned for calling semantic consistency with previous
\r
306 /*-----------------------------------------------------------*/
\r
308 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
310 Queue_t *pxNewQueue;
\r
311 size_t xQueueSizeInBytes;
\r
312 QueueHandle_t xReturn = NULL;
\r
314 /* Remove compiler warnings about unused parameters should
\r
315 configUSE_TRACE_FACILITY not be set to 1. */
\r
316 ( void ) ucQueueType;
\r
318 /* Allocate the new queue structure. */
\r
319 if( uxQueueLength > ( UBaseType_t ) 0 )
\r
321 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
322 if( pxNewQueue != NULL )
\r
324 /* Create the list of pointers to queue items. The queue is one byte
\r
325 longer than asked for to make wrap checking easier/faster. */
\r
326 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
328 pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );
\r
329 if( pxNewQueue->pcHead != NULL )
\r
331 /* Initialise the queue members as described above where the
\r
332 queue type is defined. */
\r
333 pxNewQueue->uxLength = uxQueueLength;
\r
334 pxNewQueue->uxItemSize = uxItemSize;
\r
335 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
337 #if ( configUSE_TRACE_FACILITY == 1 )
\r
339 pxNewQueue->ucQueueType = ucQueueType;
\r
341 #endif /* configUSE_TRACE_FACILITY */
\r
343 #if( configUSE_QUEUE_SETS == 1 )
\r
345 pxNewQueue->pxQueueSetContainer = NULL;
\r
347 #endif /* configUSE_QUEUE_SETS */
\r
349 traceQUEUE_CREATE( pxNewQueue );
\r
350 xReturn = pxNewQueue;
\r
354 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
355 vPortFree( pxNewQueue );
\r
360 mtCOVERAGE_TEST_MARKER();
\r
365 mtCOVERAGE_TEST_MARKER();
\r
368 configASSERT( xReturn );
\r
372 /*-----------------------------------------------------------*/
\r
374 #if ( configUSE_MUTEXES == 1 )
\r
376 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
378 Queue_t *pxNewQueue;
\r
380 /* Prevent compiler warnings about unused parameters if
\r
381 configUSE_TRACE_FACILITY does not equal 1. */
\r
382 ( void ) ucQueueType;
\r
384 /* Allocate the new queue structure. */
\r
385 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
386 if( pxNewQueue != NULL )
\r
388 /* Information required for priority inheritance. */
\r
389 pxNewQueue->pxMutexHolder = NULL;
\r
390 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
392 /* Queues used as a mutex no data is actually copied into or out
\r
394 pxNewQueue->pcWriteTo = NULL;
\r
395 pxNewQueue->u.pcReadFrom = NULL;
\r
397 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
398 an item size of 0 as nothing is actually copied into or out
\r
400 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
401 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
402 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
403 pxNewQueue->xRxLock = queueUNLOCKED;
\r
404 pxNewQueue->xTxLock = queueUNLOCKED;
\r
406 #if ( configUSE_TRACE_FACILITY == 1 )
\r
408 pxNewQueue->ucQueueType = ucQueueType;
\r
412 #if ( configUSE_QUEUE_SETS == 1 )
\r
414 pxNewQueue->pxQueueSetContainer = NULL;
\r
418 /* Ensure the event queues start with the correct state. */
\r
419 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
420 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
422 traceCREATE_MUTEX( pxNewQueue );
\r
424 /* Start with the semaphore in the expected state. Preload the
\r
425 mutex held count as calling xQueueGenericSend() will decrement the
\r
426 count back to 0. */
\r
427 vTaskIncrementMutexHeldCount();
\r
428 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
432 traceCREATE_MUTEX_FAILED();
\r
435 configASSERT( pxNewQueue );
\r
439 #endif /* configUSE_MUTEXES */
\r
440 /*-----------------------------------------------------------*/
\r
442 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
444 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
448 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
449 be called directly. Note: This is a good way of determining if the
\r
450 calling task is the mutex holder, but not a good way of determining the
\r
451 identity of the mutex holder, as the holder may change between the
\r
452 following critical section exiting and the function returning. */
\r
453 taskENTER_CRITICAL();
\r
455 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
457 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
464 taskEXIT_CRITICAL();
\r
470 /*-----------------------------------------------------------*/
\r
472 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
474 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
476 BaseType_t xReturn;
\r
477 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
479 configASSERT( pxMutex );
\r
481 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
482 change outside of this task. If this task does not hold the mutex then
\r
483 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
484 this is the only condition we are interested in it does not matter if
\r
485 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
486 mutual exclusion is required to test the pxMutexHolder variable. */
\r
487 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
489 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
491 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
492 the task handle, therefore no underflow check is required. Also,
\r
493 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
494 there can only be one, no mutual exclusion is required to modify the
\r
495 uxRecursiveCallCount member. */
\r
496 ( pxMutex->u.uxRecursiveCallCount )--;
\r
498 /* Have we unwound the call count? */
\r
499 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
501 /* Return the mutex. This will automatically unblock any other
\r
502 task that might be waiting to access the mutex. */
\r
503 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
507 mtCOVERAGE_TEST_MARKER();
\r
514 /* The mutex cannot be given because the calling task is not the
\r
518 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
524 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
525 /*-----------------------------------------------------------*/
\r
527 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
529 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
531 BaseType_t xReturn;
\r
532 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
534 configASSERT( pxMutex );
\r
536 /* Comments regarding mutual exclusion as per those within
\r
537 xQueueGiveMutexRecursive(). */
\r
539 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
541 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
543 ( pxMutex->u.uxRecursiveCallCount )++;
\r
548 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
550 /* pdPASS will only be returned if the mutex was successfully
\r
551 obtained. The calling task may have entered the Blocked state
\r
552 before reaching here. */
\r
553 if( xReturn == pdPASS )
\r
555 ( pxMutex->u.uxRecursiveCallCount )++;
\r
559 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
566 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
567 /*-----------------------------------------------------------*/
\r
569 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
571 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
573 QueueHandle_t xHandle;
\r
575 configASSERT( uxMaxCount != 0 );
\r
576 configASSERT( uxInitialCount <= uxMaxCount );
\r
578 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
580 if( xHandle != NULL )
\r
582 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
584 traceCREATE_COUNTING_SEMAPHORE();
\r
588 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
591 configASSERT( xHandle );
\r
595 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
596 /*-----------------------------------------------------------*/
\r
598 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
600 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
601 TimeOut_t xTimeOut;
\r
602 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
604 configASSERT( pxQueue );
\r
605 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
606 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
607 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
609 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
614 /* This function relaxes the coding standard somewhat to allow return
\r
615 statements within the function itself. This is done in the interest
\r
616 of execution time efficiency. */
\r
619 taskENTER_CRITICAL();
\r
621 /* Is there room on the queue now? The running task must be
\r
622 the highest priority task wanting to access the queue. If
\r
623 the head item in the queue is to be overwritten then it does
\r
624 not matter if the queue is full. */
\r
625 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
627 traceQUEUE_SEND( pxQueue );
\r
628 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
630 #if ( configUSE_QUEUE_SETS == 1 )
\r
632 if( pxQueue->pxQueueSetContainer != NULL )
\r
634 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
636 /* The queue is a member of a queue set, and posting
\r
637 to the queue set caused a higher priority task to
\r
638 unblock. A context switch is required. */
\r
639 queueYIELD_IF_USING_PREEMPTION();
\r
643 mtCOVERAGE_TEST_MARKER();
\r
648 /* If there was a task waiting for data to arrive on the
\r
649 queue then unblock it now. */
\r
650 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
652 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
654 /* The unblocked task has a priority higher than
\r
655 our own so yield immediately. Yes it is ok to
\r
656 do this from within the critical section - the
\r
657 kernel takes care of that. */
\r
658 queueYIELD_IF_USING_PREEMPTION();
\r
662 mtCOVERAGE_TEST_MARKER();
\r
665 else if( xYieldRequired != pdFALSE )
\r
667 /* This path is a special case that will only get
\r
668 executed if the task was holding multiple mutexes
\r
669 and the mutexes were given back in an order that is
\r
670 different to that in which they were taken. */
\r
671 queueYIELD_IF_USING_PREEMPTION();
\r
675 mtCOVERAGE_TEST_MARKER();
\r
679 #else /* configUSE_QUEUE_SETS */
\r
681 /* If there was a task waiting for data to arrive on the
\r
682 queue then unblock it now. */
\r
683 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
685 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
687 /* The unblocked task has a priority higher than
\r
688 our own so yield immediately. Yes it is ok to do
\r
689 this from within the critical section - the kernel
\r
690 takes care of that. */
\r
691 queueYIELD_IF_USING_PREEMPTION();
\r
695 mtCOVERAGE_TEST_MARKER();
\r
698 else if( xYieldRequired != pdFALSE )
\r
700 /* This path is a special case that will only get
\r
701 executed if the task was holding multiple mutexes and
\r
702 the mutexes were given back in an order that is
\r
703 different to that in which they were taken. */
\r
704 queueYIELD_IF_USING_PREEMPTION();
\r
708 mtCOVERAGE_TEST_MARKER();
\r
711 #endif /* configUSE_QUEUE_SETS */
\r
713 taskEXIT_CRITICAL();
\r
718 if( xTicksToWait == ( TickType_t ) 0 )
\r
720 /* The queue was full and no block time is specified (or
\r
721 the block time has expired) so leave now. */
\r
722 taskEXIT_CRITICAL();
\r
724 /* Return to the original privilege level before exiting
\r
726 traceQUEUE_SEND_FAILED( pxQueue );
\r
727 return errQUEUE_FULL;
\r
729 else if( xEntryTimeSet == pdFALSE )
\r
731 /* The queue was full and a block time was specified so
\r
732 configure the timeout structure. */
\r
733 vTaskSetTimeOutState( &xTimeOut );
\r
734 xEntryTimeSet = pdTRUE;
\r
738 /* Entry time was already set. */
\r
739 mtCOVERAGE_TEST_MARKER();
\r
743 taskEXIT_CRITICAL();
\r
745 /* Interrupts and other tasks can send to and receive from the queue
\r
746 now the critical section has been exited. */
\r
749 prvLockQueue( pxQueue );
\r
751 /* Update the timeout state to see if it has expired yet. */
\r
752 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
754 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
756 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
757 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
759 /* Unlocking the queue means queue events can effect the
\r
760 event list. It is possible that interrupts occurring now
\r
761 remove this task from the event list again - but as the
\r
762 scheduler is suspended the task will go onto the pending
\r
763 ready last instead of the actual ready list. */
\r
764 prvUnlockQueue( pxQueue );
\r
766 /* Resuming the scheduler will move tasks from the pending
\r
767 ready list into the ready list - so it is feasible that this
\r
768 task is already in a ready list before it yields - in which
\r
769 case the yield will not cause a context switch unless there
\r
770 is also a higher priority task in the pending ready list. */
\r
771 if( xTaskResumeAll() == pdFALSE )
\r
773 portYIELD_WITHIN_API();
\r
779 prvUnlockQueue( pxQueue );
\r
780 ( void ) xTaskResumeAll();
\r
785 /* The timeout has expired. */
\r
786 prvUnlockQueue( pxQueue );
\r
787 ( void ) xTaskResumeAll();
\r
789 /* Return to the original privilege level before exiting the
\r
791 traceQUEUE_SEND_FAILED( pxQueue );
\r
792 return errQUEUE_FULL;
\r
796 /*-----------------------------------------------------------*/
\r
798 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
800 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
802 BaseType_t xEntryTimeSet = pdFALSE;
\r
803 TimeOut_t xTimeOut;
\r
804 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
806 configASSERT( pxQueue );
\r
807 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
811 taskENTER_CRITICAL();
\r
813 /* Is there room on the queue now? To be running we must be
\r
814 the highest priority task wanting to access the queue. */
\r
815 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
817 traceQUEUE_SEND( pxQueue );
\r
818 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
820 /* If there was a task waiting for data to arrive on the
\r
821 queue then unblock it now. */
\r
822 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
824 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
826 /* The unblocked task has a priority higher than
\r
827 our own so yield immediately. */
\r
828 portYIELD_WITHIN_API();
\r
832 mtCOVERAGE_TEST_MARKER();
\r
837 mtCOVERAGE_TEST_MARKER();
\r
840 taskEXIT_CRITICAL();
\r
845 if( xTicksToWait == ( TickType_t ) 0 )
\r
847 taskEXIT_CRITICAL();
\r
848 return errQUEUE_FULL;
\r
850 else if( xEntryTimeSet == pdFALSE )
\r
852 vTaskSetTimeOutState( &xTimeOut );
\r
853 xEntryTimeSet = pdTRUE;
\r
857 taskEXIT_CRITICAL();
\r
859 taskENTER_CRITICAL();
\r
861 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
863 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
865 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
866 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
867 portYIELD_WITHIN_API();
\r
871 mtCOVERAGE_TEST_MARKER();
\r
876 taskEXIT_CRITICAL();
\r
877 traceQUEUE_SEND_FAILED( pxQueue );
\r
878 return errQUEUE_FULL;
\r
881 taskEXIT_CRITICAL();
\r
885 #endif /* configUSE_ALTERNATIVE_API */
\r
886 /*-----------------------------------------------------------*/
\r
888 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
890 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
892 BaseType_t xEntryTimeSet = pdFALSE;
\r
893 TimeOut_t xTimeOut;
\r
894 int8_t *pcOriginalReadPosition;
\r
895 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
897 configASSERT( pxQueue );
\r
898 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
902 taskENTER_CRITICAL();
\r
904 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
906 /* Remember our read position in case we are just peeking. */
\r
907 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
909 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
911 if( xJustPeeking == pdFALSE )
\r
913 traceQUEUE_RECEIVE( pxQueue );
\r
915 /* Data is actually being removed (not just peeked). */
\r
916 --( pxQueue->uxMessagesWaiting );
\r
918 #if ( configUSE_MUTEXES == 1 )
\r
920 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
922 /* Record the information required to implement
\r
923 priority inheritance should it become necessary. */
\r
924 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
928 mtCOVERAGE_TEST_MARKER();
\r
933 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
935 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
937 portYIELD_WITHIN_API();
\r
941 mtCOVERAGE_TEST_MARKER();
\r
947 traceQUEUE_PEEK( pxQueue );
\r
949 /* We are not removing the data, so reset our read
\r
951 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
953 /* The data is being left in the queue, so see if there are
\r
954 any other tasks waiting for the data. */
\r
955 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
957 /* Tasks that are removed from the event list will get added to
\r
958 the pending ready list as the scheduler is still suspended. */
\r
959 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
961 /* The task waiting has a higher priority than this task. */
\r
962 portYIELD_WITHIN_API();
\r
966 mtCOVERAGE_TEST_MARKER();
\r
971 mtCOVERAGE_TEST_MARKER();
\r
975 taskEXIT_CRITICAL();
\r
980 if( xTicksToWait == ( TickType_t ) 0 )
\r
982 taskEXIT_CRITICAL();
\r
983 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
984 return errQUEUE_EMPTY;
\r
986 else if( xEntryTimeSet == pdFALSE )
\r
988 vTaskSetTimeOutState( &xTimeOut );
\r
989 xEntryTimeSet = pdTRUE;
\r
993 taskEXIT_CRITICAL();
\r
995 taskENTER_CRITICAL();
\r
997 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
999 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1001 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1003 #if ( configUSE_MUTEXES == 1 )
\r
1005 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1007 taskENTER_CRITICAL();
\r
1009 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1011 taskEXIT_CRITICAL();
\r
1015 mtCOVERAGE_TEST_MARKER();
\r
1020 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1021 portYIELD_WITHIN_API();
\r
1025 mtCOVERAGE_TEST_MARKER();
\r
1030 taskEXIT_CRITICAL();
\r
1031 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1032 return errQUEUE_EMPTY;
\r
1035 taskEXIT_CRITICAL();
\r
1040 #endif /* configUSE_ALTERNATIVE_API */
\r
1041 /*-----------------------------------------------------------*/
\r
1043 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1045 BaseType_t xReturn;
\r
1046 UBaseType_t uxSavedInterruptStatus;
\r
1047 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1049 configASSERT( pxQueue );
\r
1050 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1051 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1053 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1054 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1055 above the maximum system call priority are kept permanently enabled, even
\r
1056 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1057 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1058 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1059 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1060 assigned a priority above the configured maximum system call priority.
\r
1061 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1062 that have been assigned a priority at or (logically) below the maximum
\r
1063 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1064 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1065 More information (albeit Cortex-M specific) is provided on the following
\r
1066 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1067 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1069 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1070 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1071 read, instead return a flag to say whether a context switch is required or
\r
1072 not (i.e. has a task with a higher priority than us been woken by this
\r
1074 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1076 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1078 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1080 if( prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ) != pdFALSE )
\r
1082 /* This is a special case that can only be executed if a task
\r
1083 holds multiple mutexes and then gives the mutexes back in an
\r
1084 order that is different to that in which they were taken. */
\r
1085 if( pxHigherPriorityTaskWoken != NULL )
\r
1087 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1091 mtCOVERAGE_TEST_MARKER();
\r
1095 /* The event list is not altered if the queue is locked. This will
\r
1096 be done when the queue is unlocked later. */
\r
1097 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1099 #if ( configUSE_QUEUE_SETS == 1 )
\r
1101 if( pxQueue->pxQueueSetContainer != NULL )
\r
1103 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1105 /* The queue is a member of a queue set, and posting
\r
1106 to the queue set caused a higher priority task to
\r
1107 unblock. A context switch is required. */
\r
1108 if( pxHigherPriorityTaskWoken != NULL )
\r
1110 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1114 mtCOVERAGE_TEST_MARKER();
\r
1119 mtCOVERAGE_TEST_MARKER();
\r
1124 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1126 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1128 /* The task waiting has a higher priority so record that a
\r
1129 context switch is required. */
\r
1130 if( pxHigherPriorityTaskWoken != NULL )
\r
1132 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1136 mtCOVERAGE_TEST_MARKER();
\r
1141 mtCOVERAGE_TEST_MARKER();
\r
1146 mtCOVERAGE_TEST_MARKER();
\r
1150 #else /* configUSE_QUEUE_SETS */
\r
1152 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1154 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1156 /* The task waiting has a higher priority so record that a
\r
1157 context switch is required. */
\r
1158 if( pxHigherPriorityTaskWoken != NULL )
\r
1160 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1164 mtCOVERAGE_TEST_MARKER();
\r
1169 mtCOVERAGE_TEST_MARKER();
\r
1174 mtCOVERAGE_TEST_MARKER();
\r
1177 #endif /* configUSE_QUEUE_SETS */
\r
1181 /* Increment the lock count so the task that unlocks the queue
\r
1182 knows that data was posted while it was locked. */
\r
1183 ++( pxQueue->xTxLock );
\r
1190 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1191 xReturn = errQUEUE_FULL;
\r
1194 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1198 /*-----------------------------------------------------------*/
\r
1200 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1202 BaseType_t xEntryTimeSet = pdFALSE;
\r
1203 TimeOut_t xTimeOut;
\r
1204 int8_t *pcOriginalReadPosition;
\r
1205 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1207 configASSERT( pxQueue );
\r
1208 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1209 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1211 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1215 /* This function relaxes the coding standard somewhat to allow return
\r
1216 statements within the function itself. This is done in the interest
\r
1217 of execution time efficiency. */
\r
1221 taskENTER_CRITICAL();
\r
1223 /* Is there data in the queue now? To be running we must be
\r
1224 the highest priority task wanting to access the queue. */
\r
1225 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1227 /* Remember the read position in case the queue is only being
\r
1229 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1231 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1233 if( xJustPeeking == pdFALSE )
\r
1235 traceQUEUE_RECEIVE( pxQueue );
\r
1237 /* Actually removing data, not just peeking. */
\r
1238 --( pxQueue->uxMessagesWaiting );
\r
1240 #if ( configUSE_MUTEXES == 1 )
\r
1242 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1244 /* Record the information required to implement
\r
1245 priority inheritance should it become necessary. */
\r
1246 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1250 mtCOVERAGE_TEST_MARKER();
\r
1255 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1257 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1259 queueYIELD_IF_USING_PREEMPTION();
\r
1263 mtCOVERAGE_TEST_MARKER();
\r
1268 mtCOVERAGE_TEST_MARKER();
\r
1273 traceQUEUE_PEEK( pxQueue );
\r
1275 /* The data is not being removed, so reset the read
\r
1277 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1279 /* The data is being left in the queue, so see if there are
\r
1280 any other tasks waiting for the data. */
\r
1281 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1283 /* Tasks that are removed from the event list will get added to
\r
1284 the pending ready list as the scheduler is still suspended. */
\r
1285 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1287 /* The task waiting has a higher priority than this task. */
\r
1288 queueYIELD_IF_USING_PREEMPTION();
\r
1292 mtCOVERAGE_TEST_MARKER();
\r
1297 mtCOVERAGE_TEST_MARKER();
\r
1301 taskEXIT_CRITICAL();
\r
1306 if( xTicksToWait == ( TickType_t ) 0 )
\r
1308 /* The queue was empty and no block time is specified (or
\r
1309 the block time has expired) so leave now. */
\r
1310 taskEXIT_CRITICAL();
\r
1311 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1312 return errQUEUE_EMPTY;
\r
1314 else if( xEntryTimeSet == pdFALSE )
\r
1316 /* The queue was empty and a block time was specified so
\r
1317 configure the timeout structure. */
\r
1318 vTaskSetTimeOutState( &xTimeOut );
\r
1319 xEntryTimeSet = pdTRUE;
\r
1323 /* Entry time was already set. */
\r
1324 mtCOVERAGE_TEST_MARKER();
\r
1328 taskEXIT_CRITICAL();
\r
1330 /* Interrupts and other tasks can send to and receive from the queue
\r
1331 now the critical section has been exited. */
\r
1333 vTaskSuspendAll();
\r
1334 prvLockQueue( pxQueue );
\r
1336 /* Update the timeout state to see if it has expired yet. */
\r
1337 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1339 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1341 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1343 #if ( configUSE_MUTEXES == 1 )
\r
1345 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1347 taskENTER_CRITICAL();
\r
1349 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1351 taskEXIT_CRITICAL();
\r
1355 mtCOVERAGE_TEST_MARKER();
\r
1360 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1361 prvUnlockQueue( pxQueue );
\r
1362 if( xTaskResumeAll() == pdFALSE )
\r
1364 portYIELD_WITHIN_API();
\r
1368 mtCOVERAGE_TEST_MARKER();
\r
1374 prvUnlockQueue( pxQueue );
\r
1375 ( void ) xTaskResumeAll();
\r
1380 prvUnlockQueue( pxQueue );
\r
1381 ( void ) xTaskResumeAll();
\r
1382 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1383 return errQUEUE_EMPTY;
\r
1387 /*-----------------------------------------------------------*/
\r
1389 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1391 BaseType_t xReturn;
\r
1392 UBaseType_t uxSavedInterruptStatus;
\r
1393 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1395 configASSERT( pxQueue );
\r
1396 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1398 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1399 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1400 above the maximum system call priority are kept permanently enabled, even
\r
1401 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1402 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1403 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1404 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1405 assigned a priority above the configured maximum system call priority.
\r
1406 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1407 that have been assigned a priority at or (logically) below the maximum
\r
1408 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1409 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1410 More information (albeit Cortex-M specific) is provided on the following
\r
1411 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1412 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1414 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1416 /* Cannot block in an ISR, so check there is data available. */
\r
1417 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1419 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1421 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1422 --( pxQueue->uxMessagesWaiting );
\r
1424 /* If the queue is locked the event list will not be modified.
\r
1425 Instead update the lock count so the task that unlocks the queue
\r
1426 will know that an ISR has removed data while the queue was
\r
1428 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1430 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1432 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1434 /* The task waiting has a higher priority than us so
\r
1435 force a context switch. */
\r
1436 if( pxHigherPriorityTaskWoken != NULL )
\r
1438 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1442 mtCOVERAGE_TEST_MARKER();
\r
1447 mtCOVERAGE_TEST_MARKER();
\r
1452 mtCOVERAGE_TEST_MARKER();
\r
1457 /* Increment the lock count so the task that unlocks the queue
\r
1458 knows that data was removed while it was locked. */
\r
1459 ++( pxQueue->xRxLock );
\r
1467 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1470 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1474 /*-----------------------------------------------------------*/
\r
1476 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1478 BaseType_t xReturn;
\r
1479 UBaseType_t uxSavedInterruptStatus;
\r
1480 int8_t *pcOriginalReadPosition;
\r
1481 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1483 configASSERT( pxQueue );
\r
1484 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1486 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1487 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1488 above the maximum system call priority are kept permanently enabled, even
\r
1489 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1490 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1491 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1492 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1493 assigned a priority above the configured maximum system call priority.
\r
1494 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1495 that have been assigned a priority at or (logically) below the maximum
\r
1496 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1497 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1498 More information (albeit Cortex-M specific) is provided on the following
\r
1499 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1500 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1502 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1504 /* Cannot block in an ISR, so check there is data available. */
\r
1505 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1507 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1509 /* Remember the read position so it can be reset as nothing is
\r
1510 actually being removed from the queue. */
\r
1511 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1512 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1513 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1520 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1523 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1527 /*-----------------------------------------------------------*/
\r
1529 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1531 UBaseType_t uxReturn;
\r
1533 configASSERT( xQueue );
\r
1535 taskENTER_CRITICAL();
\r
1537 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1539 taskEXIT_CRITICAL();
\r
1542 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1543 /*-----------------------------------------------------------*/
\r
1545 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1547 UBaseType_t uxReturn;
\r
1550 pxQueue = ( Queue_t * ) xQueue;
\r
1551 configASSERT( pxQueue );
\r
1553 taskENTER_CRITICAL();
\r
1555 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1557 taskEXIT_CRITICAL();
\r
1560 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1561 /*-----------------------------------------------------------*/
\r
1563 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1565 UBaseType_t uxReturn;
\r
1567 configASSERT( xQueue );
\r
1569 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1572 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1573 /*-----------------------------------------------------------*/
\r
1575 void vQueueDelete( QueueHandle_t xQueue )
\r
1577 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1579 configASSERT( pxQueue );
\r
1581 traceQUEUE_DELETE( pxQueue );
\r
1582 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1584 vQueueUnregisterQueue( pxQueue );
\r
1587 if( pxQueue->pcHead != NULL )
\r
1589 vPortFree( pxQueue->pcHead );
\r
1591 vPortFree( pxQueue );
\r
1593 /*-----------------------------------------------------------*/
\r
1595 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1597 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1599 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1602 #endif /* configUSE_TRACE_FACILITY */
\r
1603 /*-----------------------------------------------------------*/
\r
1605 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1607 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1609 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1612 #endif /* configUSE_TRACE_FACILITY */
\r
1613 /*-----------------------------------------------------------*/
\r
1615 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1617 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1619 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1622 #endif /* configUSE_TRACE_FACILITY */
\r
1623 /*-----------------------------------------------------------*/
\r
1625 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1627 BaseType_t xReturn = pdFALSE;
\r
1629 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1631 #if ( configUSE_MUTEXES == 1 )
\r
1633 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1635 /* The mutex is no longer being held. */
\r
1636 vTaskDecrementMutexHeldCount();
\r
1637 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1638 pxQueue->pxMutexHolder = NULL;
\r
1642 mtCOVERAGE_TEST_MARKER();
\r
1645 #endif /* configUSE_MUTEXES */
\r
1647 else if( xPosition == queueSEND_TO_BACK )
\r
1649 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1650 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1651 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1653 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1657 mtCOVERAGE_TEST_MARKER();
\r
1662 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1663 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1664 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1666 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1670 mtCOVERAGE_TEST_MARKER();
\r
1673 if( xPosition == queueOVERWRITE )
\r
1675 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1677 /* An item is not being added but overwritten, so subtract
\r
1678 one from the recorded number of items in the queue so when
\r
1679 one is added again below the number of recorded items remains
\r
1681 --( pxQueue->uxMessagesWaiting );
\r
1685 mtCOVERAGE_TEST_MARKER();
\r
1690 mtCOVERAGE_TEST_MARKER();
\r
1694 ++( pxQueue->uxMessagesWaiting );
\r
1698 /*-----------------------------------------------------------*/
\r
1700 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1702 if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )
\r
1704 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1705 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1707 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1711 mtCOVERAGE_TEST_MARKER();
\r
1713 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1717 /* A mutex was taken. */
\r
1718 vTaskIncrementMutexHeldCount();
\r
1721 /*-----------------------------------------------------------*/
\r
1723 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1725 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1727 /* The lock counts contains the number of extra data items placed or
\r
1728 removed from the queue while the queue was locked. When a queue is
\r
1729 locked items can be added or removed, but the event lists cannot be
\r
1731 taskENTER_CRITICAL();
\r
1733 /* See if data was added to the queue while it was locked. */
\r
1734 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1736 /* Data was posted while the queue was locked. Are any tasks
\r
1737 blocked waiting for data to become available? */
\r
1738 #if ( configUSE_QUEUE_SETS == 1 )
\r
1740 if( pxQueue->pxQueueSetContainer != NULL )
\r
1742 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1744 /* The queue is a member of a queue set, and posting to
\r
1745 the queue set caused a higher priority task to unblock.
\r
1746 A context switch is required. */
\r
1747 vTaskMissedYield();
\r
1751 mtCOVERAGE_TEST_MARKER();
\r
1756 /* Tasks that are removed from the event list will get added to
\r
1757 the pending ready list as the scheduler is still suspended. */
\r
1758 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1760 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1762 /* The task waiting has a higher priority so record that a
\r
1763 context switch is required. */
\r
1764 vTaskMissedYield();
\r
1768 mtCOVERAGE_TEST_MARKER();
\r
1777 #else /* configUSE_QUEUE_SETS */
\r
1779 /* Tasks that are removed from the event list will get added to
\r
1780 the pending ready list as the scheduler is still suspended. */
\r
1781 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1783 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1785 /* The task waiting has a higher priority so record that a
\r
1786 context switch is required. */
\r
1787 vTaskMissedYield();
\r
1791 mtCOVERAGE_TEST_MARKER();
\r
1799 #endif /* configUSE_QUEUE_SETS */
\r
1801 --( pxQueue->xTxLock );
\r
1804 pxQueue->xTxLock = queueUNLOCKED;
\r
1806 taskEXIT_CRITICAL();
\r
1808 /* Do the same for the Rx lock. */
\r
1809 taskENTER_CRITICAL();
\r
1811 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1813 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1815 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1817 vTaskMissedYield();
\r
1821 mtCOVERAGE_TEST_MARKER();
\r
1824 --( pxQueue->xRxLock );
\r
1832 pxQueue->xRxLock = queueUNLOCKED;
\r
1834 taskEXIT_CRITICAL();
\r
1836 /*-----------------------------------------------------------*/
\r
1838 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1840 BaseType_t xReturn;
\r
1842 taskENTER_CRITICAL();
\r
1844 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1850 xReturn = pdFALSE;
\r
1853 taskEXIT_CRITICAL();
\r
1857 /*-----------------------------------------------------------*/
\r
1859 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
1861 BaseType_t xReturn;
\r
1863 configASSERT( xQueue );
\r
1864 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1870 xReturn = pdFALSE;
\r
1874 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1875 /*-----------------------------------------------------------*/
\r
1877 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
1879 BaseType_t xReturn;
\r
1881 taskENTER_CRITICAL();
\r
1883 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
1889 xReturn = pdFALSE;
\r
1892 taskEXIT_CRITICAL();
\r
1896 /*-----------------------------------------------------------*/
\r
1898 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
1900 BaseType_t xReturn;
\r
1902 configASSERT( xQueue );
\r
1903 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
1909 xReturn = pdFALSE;
\r
1913 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1914 /*-----------------------------------------------------------*/
\r
1916 #if ( configUSE_CO_ROUTINES == 1 )
\r
1918 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
1920 BaseType_t xReturn;
\r
1921 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1923 /* If the queue is already full we may have to block. A critical section
\r
1924 is required to prevent an interrupt removing something from the queue
\r
1925 between the check to see if the queue is full and blocking on the queue. */
\r
1926 portDISABLE_INTERRUPTS();
\r
1928 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
1930 /* The queue is full - do we want to block or just leave without
\r
1932 if( xTicksToWait > ( TickType_t ) 0 )
\r
1934 /* As this is called from a coroutine we cannot block directly, but
\r
1935 return indicating that we need to block. */
\r
1936 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
1937 portENABLE_INTERRUPTS();
\r
1938 return errQUEUE_BLOCKED;
\r
1942 portENABLE_INTERRUPTS();
\r
1943 return errQUEUE_FULL;
\r
1947 portENABLE_INTERRUPTS();
\r
1949 portDISABLE_INTERRUPTS();
\r
1951 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1953 /* There is room in the queue, copy the data into the queue. */
\r
1954 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
1957 /* Were any co-routines waiting for data to become available? */
\r
1958 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1960 /* In this instance the co-routine could be placed directly
\r
1961 into the ready list as we are within a critical section.
\r
1962 Instead the same pending ready list mechanism is used as if
\r
1963 the event were caused from within an interrupt. */
\r
1964 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1966 /* The co-routine waiting has a higher priority so record
\r
1967 that a yield might be appropriate. */
\r
1968 xReturn = errQUEUE_YIELD;
\r
1972 mtCOVERAGE_TEST_MARKER();
\r
1977 mtCOVERAGE_TEST_MARKER();
\r
1982 xReturn = errQUEUE_FULL;
\r
1985 portENABLE_INTERRUPTS();
\r
1990 #endif /* configUSE_CO_ROUTINES */
\r
1991 /*-----------------------------------------------------------*/
\r
1993 #if ( configUSE_CO_ROUTINES == 1 )
\r
1995 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
1997 BaseType_t xReturn;
\r
1998 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2000 /* If the queue is already empty we may have to block. A critical section
\r
2001 is required to prevent an interrupt adding something to the queue
\r
2002 between the check to see if the queue is empty and blocking on the queue. */
\r
2003 portDISABLE_INTERRUPTS();
\r
2005 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2007 /* There are no messages in the queue, do we want to block or just
\r
2008 leave with nothing? */
\r
2009 if( xTicksToWait > ( TickType_t ) 0 )
\r
2011 /* As this is a co-routine we cannot block directly, but return
\r
2012 indicating that we need to block. */
\r
2013 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2014 portENABLE_INTERRUPTS();
\r
2015 return errQUEUE_BLOCKED;
\r
2019 portENABLE_INTERRUPTS();
\r
2020 return errQUEUE_FULL;
\r
2025 mtCOVERAGE_TEST_MARKER();
\r
2028 portENABLE_INTERRUPTS();
\r
2030 portDISABLE_INTERRUPTS();
\r
2032 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2034 /* Data is available from the queue. */
\r
2035 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2036 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2038 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2042 mtCOVERAGE_TEST_MARKER();
\r
2044 --( pxQueue->uxMessagesWaiting );
\r
2045 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2049 /* Were any co-routines waiting for space to become available? */
\r
2050 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2052 /* In this instance the co-routine could be placed directly
\r
2053 into the ready list as we are within a critical section.
\r
2054 Instead the same pending ready list mechanism is used as if
\r
2055 the event were caused from within an interrupt. */
\r
2056 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2058 xReturn = errQUEUE_YIELD;
\r
2062 mtCOVERAGE_TEST_MARKER();
\r
2067 mtCOVERAGE_TEST_MARKER();
\r
2075 portENABLE_INTERRUPTS();
\r
2080 #endif /* configUSE_CO_ROUTINES */
\r
2081 /*-----------------------------------------------------------*/
\r
2083 #if ( configUSE_CO_ROUTINES == 1 )
\r
2085 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2087 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2089 /* Cannot block within an ISR so if there is no space on the queue then
\r
2090 exit without doing anything. */
\r
2091 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2093 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2095 /* We only want to wake one co-routine per ISR, so check that a
\r
2096 co-routine has not already been woken. */
\r
2097 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2099 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2101 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2107 mtCOVERAGE_TEST_MARKER();
\r
2112 mtCOVERAGE_TEST_MARKER();
\r
2117 mtCOVERAGE_TEST_MARKER();
\r
2122 mtCOVERAGE_TEST_MARKER();
\r
2125 return xCoRoutinePreviouslyWoken;
\r
2128 #endif /* configUSE_CO_ROUTINES */
\r
2129 /*-----------------------------------------------------------*/
\r
2131 #if ( configUSE_CO_ROUTINES == 1 )
\r
2133 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2135 BaseType_t xReturn;
\r
2136 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2138 /* We cannot block from an ISR, so check there is data available. If
\r
2139 not then just leave without doing anything. */
\r
2140 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2142 /* Copy the data from the queue. */
\r
2143 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2144 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2146 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2150 mtCOVERAGE_TEST_MARKER();
\r
2152 --( pxQueue->uxMessagesWaiting );
\r
2153 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2155 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2157 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2159 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2161 *pxCoRoutineWoken = pdTRUE;
\r
2165 mtCOVERAGE_TEST_MARKER();
\r
2170 mtCOVERAGE_TEST_MARKER();
\r
2175 mtCOVERAGE_TEST_MARKER();
\r
2188 #endif /* configUSE_CO_ROUTINES */
\r
2189 /*-----------------------------------------------------------*/
\r
2191 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2193 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2197 /* See if there is an empty space in the registry. A NULL name denotes
\r
2199 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2201 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2203 /* Store the information on this queue. */
\r
2204 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2205 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2207 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2212 mtCOVERAGE_TEST_MARKER();
\r
2217 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2218 /*-----------------------------------------------------------*/
\r
2220 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2222 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2226 /* See if the handle of the queue being unregistered in actually in the
\r
2228 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2230 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2232 /* Set the name to NULL to show that this slot if free again. */
\r
2233 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2238 mtCOVERAGE_TEST_MARKER();
\r
2242 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2244 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2245 /*-----------------------------------------------------------*/
\r
2247 #if ( configUSE_TIMERS == 1 )
\r
2249 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
2251 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2253 /* This function should not be called by application code hence the
\r
2254 'Restricted' in its name. It is not part of the public API. It is
\r
2255 designed for use by kernel code, and has special calling requirements.
\r
2256 It can result in vListInsert() being called on a list that can only
\r
2257 possibly ever have one item in it, so the list will be fast, but even
\r
2258 so it should be called with the scheduler locked and not from a critical
\r
2261 /* Only do anything if there are no messages in the queue. This function
\r
2262 will not actually cause the task to block, just place it on a blocked
\r
2263 list. It will not block until the scheduler is unlocked - at which
\r
2264 time a yield will be performed. If an item is added to the queue while
\r
2265 the queue is locked, and the calling task blocks on the queue, then the
\r
2266 calling task will be immediately unblocked when the queue is unlocked. */
\r
2267 prvLockQueue( pxQueue );
\r
2268 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2270 /* There is nothing in the queue, block for the specified period. */
\r
2271 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2275 mtCOVERAGE_TEST_MARKER();
\r
2277 prvUnlockQueue( pxQueue );
\r
2280 #endif /* configUSE_TIMERS */
\r
2281 /*-----------------------------------------------------------*/
\r
2283 #if ( configUSE_QUEUE_SETS == 1 )
\r
2285 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2287 QueueSetHandle_t pxQueue;
\r
2289 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2294 #endif /* configUSE_QUEUE_SETS */
\r
2295 /*-----------------------------------------------------------*/
\r
2297 #if ( configUSE_QUEUE_SETS == 1 )
\r
2299 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2301 BaseType_t xReturn;
\r
2303 taskENTER_CRITICAL();
\r
2305 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2307 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2310 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2312 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2313 items in the queue/semaphore. */
\r
2318 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2322 taskEXIT_CRITICAL();
\r
2327 #endif /* configUSE_QUEUE_SETS */
\r
2328 /*-----------------------------------------------------------*/
\r
2330 #if ( configUSE_QUEUE_SETS == 1 )
\r
2332 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2334 BaseType_t xReturn;
\r
2335 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2337 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2339 /* The queue was not a member of the set. */
\r
2342 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2344 /* It is dangerous to remove a queue from a set when the queue is
\r
2345 not empty because the queue set will still hold pending events for
\r
2351 taskENTER_CRITICAL();
\r
2353 /* The queue is no longer contained in the set. */
\r
2354 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2356 taskEXIT_CRITICAL();
\r
2361 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2363 #endif /* configUSE_QUEUE_SETS */
\r
2364 /*-----------------------------------------------------------*/
\r
2366 #if ( configUSE_QUEUE_SETS == 1 )
\r
2368 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2370 QueueSetMemberHandle_t xReturn = NULL;
\r
2372 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2376 #endif /* configUSE_QUEUE_SETS */
\r
2377 /*-----------------------------------------------------------*/
\r
2379 #if ( configUSE_QUEUE_SETS == 1 )
\r
2381 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2383 QueueSetMemberHandle_t xReturn = NULL;
\r
2385 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2389 #endif /* configUSE_QUEUE_SETS */
\r
2390 /*-----------------------------------------------------------*/
\r
2392 #if ( configUSE_QUEUE_SETS == 1 )
\r
2394 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2396 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2397 BaseType_t xReturn = pdFALSE;
\r
2399 /* This function must be called form a critical section. */
\r
2401 configASSERT( pxQueueSetContainer );
\r
2402 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2404 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2406 traceQUEUE_SEND( pxQueueSetContainer );
\r
2407 /* The data copied is the handle of the queue that contains data. */
\r
2408 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2410 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2412 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2414 /* The task waiting has a higher priority */
\r
2419 mtCOVERAGE_TEST_MARKER();
\r
2424 mtCOVERAGE_TEST_MARKER();
\r
2429 mtCOVERAGE_TEST_MARKER();
\r
2435 #endif /* configUSE_QUEUE_SETS */
\r