2 FreeRTOS V8.2.2 - Copyright (C) 2015 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
13 ***************************************************************************
\r
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
15 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
16 >>! obliged to provide the source code for proprietary components !<<
\r
17 >>! outside of the FreeRTOS kernel. !<<
\r
18 ***************************************************************************
\r
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
23 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * FreeRTOS provides completely free yet professionally developed, *
\r
28 * robust, strictly quality controlled, supported, and cross *
\r
29 * platform software that is more than just the market leader, it *
\r
30 * is the industry's de facto standard. *
\r
32 * Help yourself get started quickly while simultaneously helping *
\r
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
34 * tutorial book, reference manual, or both: *
\r
35 * http://www.FreeRTOS.org/Documentation *
\r
37 ***************************************************************************
\r
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
\r
40 the FAQ page "My application does not run, what could be wrong?". Have you
\r
41 defined configASSERT()?
\r
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
\r
44 embedded software for free we request you assist our global community by
\r
45 participating in the support forum.
\r
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
\r
48 be as productive as possible as early as possible. Now you can receive
\r
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
\r
50 Ltd, and the world's leading authority on the world's leading RTOS.
\r
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
\r
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
61 licenses offer ticketed support, indemnification and commercial middleware.
\r
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
64 engineered and independently SIL3 certified version for use in safety and
\r
65 mission critical applications that require provable dependability.
\r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
74 all the API functions to use the MPU wrappers. That should only be done when
\r
75 task.h is included from an application file. */
\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
78 #include "FreeRTOS.h"
\r
82 #if ( configUSE_CO_ROUTINES == 1 )
\r
83 #include "croutine.h"
\r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
88 header files above, but not in this file, in order to generate the correct
\r
89 privileged Vs unprivileged linkage and placement. */
\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
93 /* Constants used with the xRxLock and xTxLock structure members. */
\r
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
98 pcTail members are used as pointers into the queue storage area. When the
\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
102 names to the pcHead and pcTail structure members to ensure the readability of
\r
103 the code is maintained despite this dual use of two structure members. An
\r
104 alternative implementation would be to use a union, but use of a union is
\r
105 against the coding standard (although an exception to the standard has been
\r
106 permitted where the dual use also significantly changes the type of the
\r
107 structure member). */
\r
108 #define pxMutexHolder pcTail
\r
109 #define uxQueueType pcHead
\r
110 #define queueQUEUE_IS_MUTEX NULL
\r
112 /* Semaphores do not actually store or copy data, so have an item size of
\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
117 #if( configUSE_PREEMPTION == 0 )
\r
118 /* If the cooperative scheduler is being used then a yield should not be
\r
119 performed just because a higher priority task has been woken. */
\r
120 #define queueYIELD_IF_USING_PREEMPTION()
\r
122 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
126 * Definition of the queue used by the scheduler.
\r
127 * Items are queued by copy, not reference. See the following link for the
\r
128 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
130 typedef struct QueueDefinition
\r
132 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
133 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
134 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
136 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
138 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
139 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
142 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
143 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
145 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
146 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
147 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
149 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
150 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
152 #if ( configUSE_TRACE_FACILITY == 1 )
\r
153 UBaseType_t uxQueueNumber;
\r
154 uint8_t ucQueueType;
\r
157 #if ( configUSE_QUEUE_SETS == 1 )
\r
158 struct QueueDefinition *pxQueueSetContainer;
\r
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
164 name below to enable the use of older kernel aware debuggers. */
\r
165 typedef xQUEUE Queue_t;
\r
167 /*-----------------------------------------------------------*/
\r
170 * The queue registry is just a means for kernel aware debuggers to locate
\r
171 * queue structures. It has no other purpose so is an optional component.
\r
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
175 /* The type stored within the queue registry array. This allows a name
\r
176 to be assigned to each queue making kernel aware debugging a little
\r
177 more user friendly. */
\r
178 typedef struct QUEUE_REGISTRY_ITEM
\r
180 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
181 QueueHandle_t xHandle;
\r
182 } xQueueRegistryItem;
\r
184 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
185 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
187 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
189 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
190 The pcQueueName member of a structure being NULL is indicative of the
\r
191 array position being vacant. */
\r
192 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
194 #endif /* configQUEUE_REGISTRY_SIZE */
\r
197 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
198 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
199 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
200 * queue is locked it will instead increment the appropriate queue lock count
\r
201 * to indicate that a task may require unblocking. When the queue in unlocked
\r
202 * these lock counts are inspected, and the appropriate action taken.
\r
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
207 * Uses a critical section to determine if there is any data in a queue.
\r
209 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
214 * Uses a critical section to determine if there is any space in a queue.
\r
216 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
221 * Copies an item into the queue, either at the front of the queue or the
\r
222 * back of the queue.
\r
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
227 * Copies an item out of a queue.
\r
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
231 #if ( configUSE_QUEUE_SETS == 1 )
\r
233 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
234 * the queue set that the queue contains data.
\r
236 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
239 /*-----------------------------------------------------------*/
\r
242 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
243 * accessing the queue event lists.
\r
245 #define prvLockQueue( pxQueue ) \
\r
246 taskENTER_CRITICAL(); \
\r
248 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
250 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
252 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
254 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
257 taskEXIT_CRITICAL()
\r
258 /*-----------------------------------------------------------*/
\r
260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
264 configASSERT( pxQueue );
\r
266 taskENTER_CRITICAL();
\r
268 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
269 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
270 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
271 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
272 pxQueue->xRxLock = queueUNLOCKED;
\r
273 pxQueue->xTxLock = queueUNLOCKED;
\r
275 if( xNewQueue == pdFALSE )
\r
277 /* If there are tasks blocked waiting to read from the queue, then
\r
278 the tasks will remain blocked as after this function exits the queue
\r
279 will still be empty. If there are tasks blocked waiting to write to
\r
280 the queue, then one should be unblocked as after this function exits
\r
281 it will be possible to write to it. */
\r
282 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
284 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
286 queueYIELD_IF_USING_PREEMPTION();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 mtCOVERAGE_TEST_MARKER();
\r
300 /* Ensure the event queues start in the correct state. */
\r
301 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
302 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
305 taskEXIT_CRITICAL();
\r
307 /* A value is returned for calling semantic consistency with previous
\r
311 /*-----------------------------------------------------------*/
\r
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
315 Queue_t *pxNewQueue;
\r
316 size_t xQueueSizeInBytes;
\r
317 QueueHandle_t xReturn = NULL;
\r
319 /* Remove compiler warnings about unused parameters should
\r
320 configUSE_TRACE_FACILITY not be set to 1. */
\r
321 ( void ) ucQueueType;
\r
323 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
325 if( uxItemSize == ( UBaseType_t ) 0 )
\r
327 /* There is not going to be a queue storage area. */
\r
328 xQueueSizeInBytes = ( size_t ) 0;
\r
332 /* The queue is one byte longer than asked for to make wrap checking
\r
334 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
337 /* Allocate the new queue structure and storage area. */
\r
338 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
340 if( pxNewQueue != NULL )
\r
342 if( uxItemSize == ( UBaseType_t ) 0 )
\r
344 /* No RAM was allocated for the queue storage area, but PC head
\r
345 cannot be set to NULL because NULL is used as a key to say the queue
\r
346 is used as a mutex. Therefore just set pcHead to point to the queue
\r
347 as a benign value that is known to be within the memory map. */
\r
348 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
352 /* Jump past the queue structure to find the location of the queue
\r
354 pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
357 /* Initialise the queue members as described above where the queue type
\r
359 pxNewQueue->uxLength = uxQueueLength;
\r
360 pxNewQueue->uxItemSize = uxItemSize;
\r
361 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
363 #if ( configUSE_TRACE_FACILITY == 1 )
\r
365 pxNewQueue->ucQueueType = ucQueueType;
\r
367 #endif /* configUSE_TRACE_FACILITY */
\r
369 #if( configUSE_QUEUE_SETS == 1 )
\r
371 pxNewQueue->pxQueueSetContainer = NULL;
\r
373 #endif /* configUSE_QUEUE_SETS */
\r
375 traceQUEUE_CREATE( pxNewQueue );
\r
376 xReturn = pxNewQueue;
\r
380 mtCOVERAGE_TEST_MARKER();
\r
383 configASSERT( xReturn );
\r
387 /*-----------------------------------------------------------*/
\r
389 #if ( configUSE_MUTEXES == 1 )
\r
391 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
393 Queue_t *pxNewQueue;
\r
395 /* Prevent compiler warnings about unused parameters if
\r
396 configUSE_TRACE_FACILITY does not equal 1. */
\r
397 ( void ) ucQueueType;
\r
399 /* Allocate the new queue structure. */
\r
400 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
401 if( pxNewQueue != NULL )
\r
403 /* Information required for priority inheritance. */
\r
404 pxNewQueue->pxMutexHolder = NULL;
\r
405 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
407 /* Queues used as a mutex no data is actually copied into or out
\r
409 pxNewQueue->pcWriteTo = NULL;
\r
410 pxNewQueue->u.pcReadFrom = NULL;
\r
412 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
413 an item size of 0 as nothing is actually copied into or out
\r
415 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
416 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
417 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
418 pxNewQueue->xRxLock = queueUNLOCKED;
\r
419 pxNewQueue->xTxLock = queueUNLOCKED;
\r
421 #if ( configUSE_TRACE_FACILITY == 1 )
\r
423 pxNewQueue->ucQueueType = ucQueueType;
\r
427 #if ( configUSE_QUEUE_SETS == 1 )
\r
429 pxNewQueue->pxQueueSetContainer = NULL;
\r
433 /* Ensure the event queues start with the correct state. */
\r
434 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
435 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
437 traceCREATE_MUTEX( pxNewQueue );
\r
439 /* Start with the semaphore in the expected state. */
\r
440 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
444 traceCREATE_MUTEX_FAILED();
\r
447 configASSERT( pxNewQueue );
\r
451 #endif /* configUSE_MUTEXES */
\r
452 /*-----------------------------------------------------------*/
\r
454 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
456 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
460 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
461 be called directly. Note: This is a good way of determining if the
\r
462 calling task is the mutex holder, but not a good way of determining the
\r
463 identity of the mutex holder, as the holder may change between the
\r
464 following critical section exiting and the function returning. */
\r
465 taskENTER_CRITICAL();
\r
467 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
469 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
476 taskEXIT_CRITICAL();
\r
479 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
482 /*-----------------------------------------------------------*/
\r
484 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
486 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
488 BaseType_t xReturn;
\r
489 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
491 configASSERT( pxMutex );
\r
493 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
494 change outside of this task. If this task does not hold the mutex then
\r
495 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
496 this is the only condition we are interested in it does not matter if
\r
497 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
498 mutual exclusion is required to test the pxMutexHolder variable. */
\r
499 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
501 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
503 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
504 the task handle, therefore no underflow check is required. Also,
\r
505 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
506 there can only be one, no mutual exclusion is required to modify the
\r
507 uxRecursiveCallCount member. */
\r
508 ( pxMutex->u.uxRecursiveCallCount )--;
\r
510 /* Have we unwound the call count? */
\r
511 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
513 /* Return the mutex. This will automatically unblock any other
\r
514 task that might be waiting to access the mutex. */
\r
515 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
519 mtCOVERAGE_TEST_MARKER();
\r
526 /* The mutex cannot be given because the calling task is not the
\r
530 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
536 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
537 /*-----------------------------------------------------------*/
\r
539 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
541 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
543 BaseType_t xReturn;
\r
544 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
546 configASSERT( pxMutex );
\r
548 /* Comments regarding mutual exclusion as per those within
\r
549 xQueueGiveMutexRecursive(). */
\r
551 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
553 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
555 ( pxMutex->u.uxRecursiveCallCount )++;
\r
560 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
562 /* pdPASS will only be returned if the mutex was successfully
\r
563 obtained. The calling task may have entered the Blocked state
\r
564 before reaching here. */
\r
565 if( xReturn == pdPASS )
\r
567 ( pxMutex->u.uxRecursiveCallCount )++;
\r
571 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
578 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
579 /*-----------------------------------------------------------*/
\r
581 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
583 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
585 QueueHandle_t xHandle;
\r
587 configASSERT( uxMaxCount != 0 );
\r
588 configASSERT( uxInitialCount <= uxMaxCount );
\r
590 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
592 if( xHandle != NULL )
\r
594 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
596 traceCREATE_COUNTING_SEMAPHORE();
\r
600 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
603 configASSERT( xHandle );
\r
607 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
608 /*-----------------------------------------------------------*/
\r
610 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
612 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
613 TimeOut_t xTimeOut;
\r
614 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
616 configASSERT( pxQueue );
\r
617 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
618 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
619 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
621 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
626 /* This function relaxes the coding standard somewhat to allow return
\r
627 statements within the function itself. This is done in the interest
\r
628 of execution time efficiency. */
\r
631 taskENTER_CRITICAL();
\r
633 /* Is there room on the queue now? The running task must be the
\r
634 highest priority task wanting to access the queue. If the head item
\r
635 in the queue is to be overwritten then it does not matter if the
\r
637 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
639 traceQUEUE_SEND( pxQueue );
\r
640 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
642 #if ( configUSE_QUEUE_SETS == 1 )
\r
644 if( pxQueue->pxQueueSetContainer != NULL )
\r
646 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
648 /* The queue is a member of a queue set, and posting
\r
649 to the queue set caused a higher priority task to
\r
650 unblock. A context switch is required. */
\r
651 queueYIELD_IF_USING_PREEMPTION();
\r
655 mtCOVERAGE_TEST_MARKER();
\r
660 /* If there was a task waiting for data to arrive on the
\r
661 queue then unblock it now. */
\r
662 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
664 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
666 /* The unblocked task has a priority higher than
\r
667 our own so yield immediately. Yes it is ok to
\r
668 do this from within the critical section - the
\r
669 kernel takes care of that. */
\r
670 queueYIELD_IF_USING_PREEMPTION();
\r
674 mtCOVERAGE_TEST_MARKER();
\r
677 else if( xYieldRequired != pdFALSE )
\r
679 /* This path is a special case that will only get
\r
680 executed if the task was holding multiple mutexes
\r
681 and the mutexes were given back in an order that is
\r
682 different to that in which they were taken. */
\r
683 queueYIELD_IF_USING_PREEMPTION();
\r
687 mtCOVERAGE_TEST_MARKER();
\r
691 #else /* configUSE_QUEUE_SETS */
\r
693 /* If there was a task waiting for data to arrive on the
\r
694 queue then unblock it now. */
\r
695 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
697 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
699 /* The unblocked task has a priority higher than
\r
700 our own so yield immediately. Yes it is ok to do
\r
701 this from within the critical section - the kernel
\r
702 takes care of that. */
\r
703 queueYIELD_IF_USING_PREEMPTION();
\r
707 mtCOVERAGE_TEST_MARKER();
\r
710 else if( xYieldRequired != pdFALSE )
\r
712 /* This path is a special case that will only get
\r
713 executed if the task was holding multiple mutexes and
\r
714 the mutexes were given back in an order that is
\r
715 different to that in which they were taken. */
\r
716 queueYIELD_IF_USING_PREEMPTION();
\r
720 mtCOVERAGE_TEST_MARKER();
\r
723 #endif /* configUSE_QUEUE_SETS */
\r
725 taskEXIT_CRITICAL();
\r
730 if( xTicksToWait == ( TickType_t ) 0 )
\r
732 /* The queue was full and no block time is specified (or
\r
733 the block time has expired) so leave now. */
\r
734 taskEXIT_CRITICAL();
\r
736 /* Return to the original privilege level before exiting
\r
738 traceQUEUE_SEND_FAILED( pxQueue );
\r
739 return errQUEUE_FULL;
\r
741 else if( xEntryTimeSet == pdFALSE )
\r
743 /* The queue was full and a block time was specified so
\r
744 configure the timeout structure. */
\r
745 vTaskSetTimeOutState( &xTimeOut );
\r
746 xEntryTimeSet = pdTRUE;
\r
750 /* Entry time was already set. */
\r
751 mtCOVERAGE_TEST_MARKER();
\r
755 taskEXIT_CRITICAL();
\r
757 /* Interrupts and other tasks can send to and receive from the queue
\r
758 now the critical section has been exited. */
\r
761 prvLockQueue( pxQueue );
\r
763 /* Update the timeout state to see if it has expired yet. */
\r
764 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
766 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
768 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
769 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
771 /* Unlocking the queue means queue events can effect the
\r
772 event list. It is possible that interrupts occurring now
\r
773 remove this task from the event list again - but as the
\r
774 scheduler is suspended the task will go onto the pending
\r
775 ready last instead of the actual ready list. */
\r
776 prvUnlockQueue( pxQueue );
\r
778 /* Resuming the scheduler will move tasks from the pending
\r
779 ready list into the ready list - so it is feasible that this
\r
780 task is already in a ready list before it yields - in which
\r
781 case the yield will not cause a context switch unless there
\r
782 is also a higher priority task in the pending ready list. */
\r
783 if( xTaskResumeAll() == pdFALSE )
\r
785 portYIELD_WITHIN_API();
\r
791 prvUnlockQueue( pxQueue );
\r
792 ( void ) xTaskResumeAll();
\r
797 /* The timeout has expired. */
\r
798 prvUnlockQueue( pxQueue );
\r
799 ( void ) xTaskResumeAll();
\r
801 /* Return to the original privilege level before exiting the
\r
803 traceQUEUE_SEND_FAILED( pxQueue );
\r
804 return errQUEUE_FULL;
\r
808 /*-----------------------------------------------------------*/
\r
810 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
812 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
814 BaseType_t xEntryTimeSet = pdFALSE;
\r
815 TimeOut_t xTimeOut;
\r
816 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
818 configASSERT( pxQueue );
\r
819 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
823 taskENTER_CRITICAL();
\r
825 /* Is there room on the queue now? To be running we must be
\r
826 the highest priority task wanting to access the queue. */
\r
827 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
829 traceQUEUE_SEND( pxQueue );
\r
830 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
832 /* If there was a task waiting for data to arrive on the
\r
833 queue then unblock it now. */
\r
834 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
836 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
838 /* The unblocked task has a priority higher than
\r
839 our own so yield immediately. */
\r
840 portYIELD_WITHIN_API();
\r
844 mtCOVERAGE_TEST_MARKER();
\r
849 mtCOVERAGE_TEST_MARKER();
\r
852 taskEXIT_CRITICAL();
\r
857 if( xTicksToWait == ( TickType_t ) 0 )
\r
859 taskEXIT_CRITICAL();
\r
860 return errQUEUE_FULL;
\r
862 else if( xEntryTimeSet == pdFALSE )
\r
864 vTaskSetTimeOutState( &xTimeOut );
\r
865 xEntryTimeSet = pdTRUE;
\r
869 taskEXIT_CRITICAL();
\r
871 taskENTER_CRITICAL();
\r
873 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
875 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
877 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
878 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
879 portYIELD_WITHIN_API();
\r
883 mtCOVERAGE_TEST_MARKER();
\r
888 taskEXIT_CRITICAL();
\r
889 traceQUEUE_SEND_FAILED( pxQueue );
\r
890 return errQUEUE_FULL;
\r
893 taskEXIT_CRITICAL();
\r
897 #endif /* configUSE_ALTERNATIVE_API */
\r
898 /*-----------------------------------------------------------*/
\r
900 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
902 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
904 BaseType_t xEntryTimeSet = pdFALSE;
\r
905 TimeOut_t xTimeOut;
\r
906 int8_t *pcOriginalReadPosition;
\r
907 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
909 configASSERT( pxQueue );
\r
910 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
914 taskENTER_CRITICAL();
\r
916 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
918 /* Remember our read position in case we are just peeking. */
\r
919 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
921 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
923 if( xJustPeeking == pdFALSE )
\r
925 traceQUEUE_RECEIVE( pxQueue );
\r
927 /* Data is actually being removed (not just peeked). */
\r
928 --( pxQueue->uxMessagesWaiting );
\r
930 #if ( configUSE_MUTEXES == 1 )
\r
932 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
934 /* Record the information required to implement
\r
935 priority inheritance should it become necessary. */
\r
936 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
940 mtCOVERAGE_TEST_MARKER();
\r
945 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
947 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
949 portYIELD_WITHIN_API();
\r
953 mtCOVERAGE_TEST_MARKER();
\r
959 traceQUEUE_PEEK( pxQueue );
\r
961 /* The data is not being removed, so reset our read
\r
963 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
965 /* The data is being left in the queue, so see if there are
\r
966 any other tasks waiting for the data. */
\r
967 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
969 /* Tasks that are removed from the event list will get added to
\r
970 the pending ready list as the scheduler is still suspended. */
\r
971 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
973 /* The task waiting has a higher priority than this task. */
\r
974 portYIELD_WITHIN_API();
\r
978 mtCOVERAGE_TEST_MARKER();
\r
983 mtCOVERAGE_TEST_MARKER();
\r
987 taskEXIT_CRITICAL();
\r
992 if( xTicksToWait == ( TickType_t ) 0 )
\r
994 taskEXIT_CRITICAL();
\r
995 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
996 return errQUEUE_EMPTY;
\r
998 else if( xEntryTimeSet == pdFALSE )
\r
1000 vTaskSetTimeOutState( &xTimeOut );
\r
1001 xEntryTimeSet = pdTRUE;
\r
1005 taskEXIT_CRITICAL();
\r
1007 taskENTER_CRITICAL();
\r
1009 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1011 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1013 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1015 #if ( configUSE_MUTEXES == 1 )
\r
1017 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1019 taskENTER_CRITICAL();
\r
1021 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1023 taskEXIT_CRITICAL();
\r
1027 mtCOVERAGE_TEST_MARKER();
\r
1032 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1033 portYIELD_WITHIN_API();
\r
1037 mtCOVERAGE_TEST_MARKER();
\r
1042 taskEXIT_CRITICAL();
\r
1043 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1044 return errQUEUE_EMPTY;
\r
1047 taskEXIT_CRITICAL();
\r
1052 #endif /* configUSE_ALTERNATIVE_API */
\r
1053 /*-----------------------------------------------------------*/
\r
1055 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1057 BaseType_t xReturn;
\r
1058 UBaseType_t uxSavedInterruptStatus;
\r
1059 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1061 configASSERT( pxQueue );
\r
1062 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1063 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1065 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1066 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1067 above the maximum system call priority are kept permanently enabled, even
\r
1068 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1069 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1070 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1071 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1072 assigned a priority above the configured maximum system call priority.
\r
1073 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1074 that have been assigned a priority at or (logically) below the maximum
\r
1075 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1076 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1077 More information (albeit Cortex-M specific) is provided on the following
\r
1078 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1079 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1081 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1082 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1083 read, instead return a flag to say whether a context switch is required or
\r
1084 not (i.e. has a task with a higher priority than us been woken by this
\r
1086 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1088 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1090 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1092 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
1093 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
1094 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
1095 called here even though the disinherit function does not check if
\r
1096 the scheduler is suspended before accessing the ready lists. */
\r
1097 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1099 /* The event list is not altered if the queue is locked. This will
\r
1100 be done when the queue is unlocked later. */
\r
1101 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1103 #if ( configUSE_QUEUE_SETS == 1 )
\r
1105 if( pxQueue->pxQueueSetContainer != NULL )
\r
1107 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1109 /* The queue is a member of a queue set, and posting
\r
1110 to the queue set caused a higher priority task to
\r
1111 unblock. A context switch is required. */
\r
1112 if( pxHigherPriorityTaskWoken != NULL )
\r
1114 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1118 mtCOVERAGE_TEST_MARKER();
\r
1123 mtCOVERAGE_TEST_MARKER();
\r
1128 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1130 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1132 /* The task waiting has a higher priority so
\r
1133 record that a context switch is required. */
\r
1134 if( pxHigherPriorityTaskWoken != NULL )
\r
1136 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1140 mtCOVERAGE_TEST_MARKER();
\r
1145 mtCOVERAGE_TEST_MARKER();
\r
1150 mtCOVERAGE_TEST_MARKER();
\r
1154 #else /* configUSE_QUEUE_SETS */
\r
1156 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1158 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1160 /* The task waiting has a higher priority so record that a
\r
1161 context switch is required. */
\r
1162 if( pxHigherPriorityTaskWoken != NULL )
\r
1164 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1168 mtCOVERAGE_TEST_MARKER();
\r
1173 mtCOVERAGE_TEST_MARKER();
\r
1178 mtCOVERAGE_TEST_MARKER();
\r
1181 #endif /* configUSE_QUEUE_SETS */
\r
1185 /* Increment the lock count so the task that unlocks the queue
\r
1186 knows that data was posted while it was locked. */
\r
1187 ++( pxQueue->xTxLock );
\r
1194 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1195 xReturn = errQUEUE_FULL;
\r
1198 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1202 /*-----------------------------------------------------------*/
\r
1204 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1206 BaseType_t xReturn;
\r
1207 UBaseType_t uxSavedInterruptStatus;
\r
1208 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1210 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1211 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1212 read, instead return a flag to say whether a context switch is required or
\r
1213 not (i.e. has a task with a higher priority than us been woken by this
\r
1216 configASSERT( pxQueue );
\r
1218 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1219 if the item size is not 0. */
\r
1220 configASSERT( pxQueue->uxItemSize == 0 );
\r
1222 /* Normally a mutex would not be given from an interrupt, especially if
\r
1223 there is a mutex holder, as priority inheritance makes no sense for an
\r
1224 interrupts, only tasks. */
\r
1225 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1227 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1228 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1229 above the maximum system call priority are kept permanently enabled, even
\r
1230 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1231 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1232 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1233 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1234 assigned a priority above the configured maximum system call priority.
\r
1235 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1236 that have been assigned a priority at or (logically) below the maximum
\r
1237 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1238 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1239 More information (albeit Cortex-M specific) is provided on the following
\r
1240 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1241 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1243 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1245 /* When the queue is used to implement a semaphore no data is ever
\r
1246 moved through the queue but it is still valid to see if the queue 'has
\r
1248 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1250 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1252 /* A task can only have an inherited priority if it is a mutex
\r
1253 holder - and if there is a mutex holder then the mutex cannot be
\r
1254 given from an ISR. As this is the ISR version of the function it
\r
1255 can be assumed there is no mutex holder and no need to determine if
\r
1256 priority disinheritance is needed. Simply increase the count of
\r
1257 messages (semaphores) available. */
\r
1258 ++( pxQueue->uxMessagesWaiting );
\r
1260 /* The event list is not altered if the queue is locked. This will
\r
1261 be done when the queue is unlocked later. */
\r
1262 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1264 #if ( configUSE_QUEUE_SETS == 1 )
\r
1266 if( pxQueue->pxQueueSetContainer != NULL )
\r
1268 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1270 /* The semaphore is a member of a queue set, and
\r
1271 posting to the queue set caused a higher priority
\r
1272 task to unblock. A context switch is required. */
\r
1273 if( pxHigherPriorityTaskWoken != NULL )
\r
1275 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1279 mtCOVERAGE_TEST_MARKER();
\r
1284 mtCOVERAGE_TEST_MARKER();
\r
1289 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1291 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1293 /* The task waiting has a higher priority so
\r
1294 record that a context switch is required. */
\r
1295 if( pxHigherPriorityTaskWoken != NULL )
\r
1297 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1301 mtCOVERAGE_TEST_MARKER();
\r
1306 mtCOVERAGE_TEST_MARKER();
\r
1311 mtCOVERAGE_TEST_MARKER();
\r
1315 #else /* configUSE_QUEUE_SETS */
\r
1317 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1319 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1321 /* The task waiting has a higher priority so record that a
\r
1322 context switch is required. */
\r
1323 if( pxHigherPriorityTaskWoken != NULL )
\r
1325 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1329 mtCOVERAGE_TEST_MARKER();
\r
1334 mtCOVERAGE_TEST_MARKER();
\r
1339 mtCOVERAGE_TEST_MARKER();
\r
1342 #endif /* configUSE_QUEUE_SETS */
\r
1346 /* Increment the lock count so the task that unlocks the queue
\r
1347 knows that data was posted while it was locked. */
\r
1348 ++( pxQueue->xTxLock );
\r
1355 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1356 xReturn = errQUEUE_FULL;
\r
1359 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1363 /*-----------------------------------------------------------*/
\r
1365 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1367 BaseType_t xEntryTimeSet = pdFALSE;
\r
1368 TimeOut_t xTimeOut;
\r
1369 int8_t *pcOriginalReadPosition;
\r
1370 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1372 configASSERT( pxQueue );
\r
1373 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1374 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1376 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1380 /* This function relaxes the coding standard somewhat to allow return
\r
1381 statements within the function itself. This is done in the interest
\r
1382 of execution time efficiency. */
\r
1386 taskENTER_CRITICAL();
\r
1388 /* Is there data in the queue now? To be running the calling task
\r
1389 must be the highest priority task wanting to access the queue. */
\r
1390 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1392 /* Remember the read position in case the queue is only being
\r
1394 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1396 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1398 if( xJustPeeking == pdFALSE )
\r
1400 traceQUEUE_RECEIVE( pxQueue );
\r
1402 /* Actually removing data, not just peeking. */
\r
1403 --( pxQueue->uxMessagesWaiting );
\r
1405 #if ( configUSE_MUTEXES == 1 )
\r
1407 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1409 /* Record the information required to implement
\r
1410 priority inheritance should it become necessary. */
\r
1411 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1415 mtCOVERAGE_TEST_MARKER();
\r
1418 #endif /* configUSE_MUTEXES */
\r
1420 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1422 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1424 queueYIELD_IF_USING_PREEMPTION();
\r
1428 mtCOVERAGE_TEST_MARKER();
\r
1433 mtCOVERAGE_TEST_MARKER();
\r
1438 traceQUEUE_PEEK( pxQueue );
\r
1440 /* The data is not being removed, so reset the read
\r
1442 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1444 /* The data is being left in the queue, so see if there are
\r
1445 any other tasks waiting for the data. */
\r
1446 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1448 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1450 /* The task waiting has a higher priority than this task. */
\r
1451 queueYIELD_IF_USING_PREEMPTION();
\r
1455 mtCOVERAGE_TEST_MARKER();
\r
1460 mtCOVERAGE_TEST_MARKER();
\r
1464 taskEXIT_CRITICAL();
\r
1469 if( xTicksToWait == ( TickType_t ) 0 )
\r
1471 /* The queue was empty and no block time is specified (or
\r
1472 the block time has expired) so leave now. */
\r
1473 taskEXIT_CRITICAL();
\r
1474 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1475 return errQUEUE_EMPTY;
\r
1477 else if( xEntryTimeSet == pdFALSE )
\r
1479 /* The queue was empty and a block time was specified so
\r
1480 configure the timeout structure. */
\r
1481 vTaskSetTimeOutState( &xTimeOut );
\r
1482 xEntryTimeSet = pdTRUE;
\r
1486 /* Entry time was already set. */
\r
1487 mtCOVERAGE_TEST_MARKER();
\r
1491 taskEXIT_CRITICAL();
\r
1493 /* Interrupts and other tasks can send to and receive from the queue
\r
1494 now the critical section has been exited. */
\r
1496 vTaskSuspendAll();
\r
1497 prvLockQueue( pxQueue );
\r
1499 /* Update the timeout state to see if it has expired yet. */
\r
1500 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1502 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1504 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1506 #if ( configUSE_MUTEXES == 1 )
\r
1508 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1510 taskENTER_CRITICAL();
\r
1512 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1514 taskEXIT_CRITICAL();
\r
1518 mtCOVERAGE_TEST_MARKER();
\r
1523 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1524 prvUnlockQueue( pxQueue );
\r
1525 if( xTaskResumeAll() == pdFALSE )
\r
1527 portYIELD_WITHIN_API();
\r
1531 mtCOVERAGE_TEST_MARKER();
\r
1537 prvUnlockQueue( pxQueue );
\r
1538 ( void ) xTaskResumeAll();
\r
1543 prvUnlockQueue( pxQueue );
\r
1544 ( void ) xTaskResumeAll();
\r
1545 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1546 return errQUEUE_EMPTY;
\r
1550 /*-----------------------------------------------------------*/
\r
1552 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1554 BaseType_t xReturn;
\r
1555 UBaseType_t uxSavedInterruptStatus;
\r
1556 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1558 configASSERT( pxQueue );
\r
1559 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1561 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1562 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1563 above the maximum system call priority are kept permanently enabled, even
\r
1564 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1565 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1566 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1567 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1568 assigned a priority above the configured maximum system call priority.
\r
1569 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1570 that have been assigned a priority at or (logically) below the maximum
\r
1571 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1572 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1573 More information (albeit Cortex-M specific) is provided on the following
\r
1574 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1575 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1577 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1579 /* Cannot block in an ISR, so check there is data available. */
\r
1580 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1582 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1584 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1585 --( pxQueue->uxMessagesWaiting );
\r
1587 /* If the queue is locked the event list will not be modified.
\r
1588 Instead update the lock count so the task that unlocks the queue
\r
1589 will know that an ISR has removed data while the queue was
\r
1591 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1593 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1595 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1597 /* The task waiting has a higher priority than us so
\r
1598 force a context switch. */
\r
1599 if( pxHigherPriorityTaskWoken != NULL )
\r
1601 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1605 mtCOVERAGE_TEST_MARKER();
\r
1610 mtCOVERAGE_TEST_MARKER();
\r
1615 mtCOVERAGE_TEST_MARKER();
\r
1620 /* Increment the lock count so the task that unlocks the queue
\r
1621 knows that data was removed while it was locked. */
\r
1622 ++( pxQueue->xRxLock );
\r
1630 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1633 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1637 /*-----------------------------------------------------------*/
\r
1639 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1641 BaseType_t xReturn;
\r
1642 UBaseType_t uxSavedInterruptStatus;
\r
1643 int8_t *pcOriginalReadPosition;
\r
1644 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1646 configASSERT( pxQueue );
\r
1647 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1648 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1650 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1651 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1652 above the maximum system call priority are kept permanently enabled, even
\r
1653 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1654 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1655 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1656 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1657 assigned a priority above the configured maximum system call priority.
\r
1658 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1659 that have been assigned a priority at or (logically) below the maximum
\r
1660 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1661 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1662 More information (albeit Cortex-M specific) is provided on the following
\r
1663 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1664 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1666 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1668 /* Cannot block in an ISR, so check there is data available. */
\r
1669 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1671 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1673 /* Remember the read position so it can be reset as nothing is
\r
1674 actually being removed from the queue. */
\r
1675 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1676 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1677 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1684 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1687 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1691 /*-----------------------------------------------------------*/
\r
1693 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1695 UBaseType_t uxReturn;
\r
1697 configASSERT( xQueue );
\r
1699 taskENTER_CRITICAL();
\r
1701 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1703 taskEXIT_CRITICAL();
\r
1706 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1707 /*-----------------------------------------------------------*/
\r
1709 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1711 UBaseType_t uxReturn;
\r
1714 pxQueue = ( Queue_t * ) xQueue;
\r
1715 configASSERT( pxQueue );
\r
1717 taskENTER_CRITICAL();
\r
1719 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1721 taskEXIT_CRITICAL();
\r
1724 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1725 /*-----------------------------------------------------------*/
\r
1727 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1729 UBaseType_t uxReturn;
\r
1731 configASSERT( xQueue );
\r
1733 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1736 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1737 /*-----------------------------------------------------------*/
\r
1739 void vQueueDelete( QueueHandle_t xQueue )
\r
1741 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1743 configASSERT( pxQueue );
\r
1745 traceQUEUE_DELETE( pxQueue );
\r
1746 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1748 vQueueUnregisterQueue( pxQueue );
\r
1751 vPortFree( pxQueue );
\r
1753 /*-----------------------------------------------------------*/
\r
1755 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1757 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1759 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1762 #endif /* configUSE_TRACE_FACILITY */
\r
1763 /*-----------------------------------------------------------*/
\r
1765 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1767 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1769 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1772 #endif /* configUSE_TRACE_FACILITY */
\r
1773 /*-----------------------------------------------------------*/
\r
1775 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1777 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1779 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1782 #endif /* configUSE_TRACE_FACILITY */
\r
1783 /*-----------------------------------------------------------*/
\r
1785 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1787 BaseType_t xReturn = pdFALSE;
\r
1789 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1791 #if ( configUSE_MUTEXES == 1 )
\r
1793 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1795 /* The mutex is no longer being held. */
\r
1796 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1797 pxQueue->pxMutexHolder = NULL;
\r
1801 mtCOVERAGE_TEST_MARKER();
\r
1804 #endif /* configUSE_MUTEXES */
\r
1806 else if( xPosition == queueSEND_TO_BACK )
\r
1808 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1809 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1810 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1812 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1816 mtCOVERAGE_TEST_MARKER();
\r
1821 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1822 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1823 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1825 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1829 mtCOVERAGE_TEST_MARKER();
\r
1832 if( xPosition == queueOVERWRITE )
\r
1834 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1836 /* An item is not being added but overwritten, so subtract
\r
1837 one from the recorded number of items in the queue so when
\r
1838 one is added again below the number of recorded items remains
\r
1840 --( pxQueue->uxMessagesWaiting );
\r
1844 mtCOVERAGE_TEST_MARKER();
\r
1849 mtCOVERAGE_TEST_MARKER();
\r
1853 ++( pxQueue->uxMessagesWaiting );
\r
1857 /*-----------------------------------------------------------*/
\r
1859 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1861 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
1863 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1864 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1866 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1870 mtCOVERAGE_TEST_MARKER();
\r
1872 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1875 /*-----------------------------------------------------------*/
\r
1877 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1879 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1881 /* The lock counts contains the number of extra data items placed or
\r
1882 removed from the queue while the queue was locked. When a queue is
\r
1883 locked items can be added or removed, but the event lists cannot be
\r
1885 taskENTER_CRITICAL();
\r
1887 /* See if data was added to the queue while it was locked. */
\r
1888 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1890 /* Data was posted while the queue was locked. Are any tasks
\r
1891 blocked waiting for data to become available? */
\r
1892 #if ( configUSE_QUEUE_SETS == 1 )
\r
1894 if( pxQueue->pxQueueSetContainer != NULL )
\r
1896 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1898 /* The queue is a member of a queue set, and posting to
\r
1899 the queue set caused a higher priority task to unblock.
\r
1900 A context switch is required. */
\r
1901 vTaskMissedYield();
\r
1905 mtCOVERAGE_TEST_MARKER();
\r
1910 /* Tasks that are removed from the event list will get added to
\r
1911 the pending ready list as the scheduler is still suspended. */
\r
1912 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1914 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1916 /* The task waiting has a higher priority so record that a
\r
1917 context switch is required. */
\r
1918 vTaskMissedYield();
\r
1922 mtCOVERAGE_TEST_MARKER();
\r
1931 #else /* configUSE_QUEUE_SETS */
\r
1933 /* Tasks that are removed from the event list will get added to
\r
1934 the pending ready list as the scheduler is still suspended. */
\r
1935 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1937 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1939 /* The task waiting has a higher priority so record that a
\r
1940 context switch is required. */
\r
1941 vTaskMissedYield();
\r
1945 mtCOVERAGE_TEST_MARKER();
\r
1953 #endif /* configUSE_QUEUE_SETS */
\r
1955 --( pxQueue->xTxLock );
\r
1958 pxQueue->xTxLock = queueUNLOCKED;
\r
1960 taskEXIT_CRITICAL();
\r
1962 /* Do the same for the Rx lock. */
\r
1963 taskENTER_CRITICAL();
\r
1965 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1967 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1969 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1971 vTaskMissedYield();
\r
1975 mtCOVERAGE_TEST_MARKER();
\r
1978 --( pxQueue->xRxLock );
\r
1986 pxQueue->xRxLock = queueUNLOCKED;
\r
1988 taskEXIT_CRITICAL();
\r
1990 /*-----------------------------------------------------------*/
\r
1992 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1994 BaseType_t xReturn;
\r
1996 taskENTER_CRITICAL();
\r
1998 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2004 xReturn = pdFALSE;
\r
2007 taskEXIT_CRITICAL();
\r
2011 /*-----------------------------------------------------------*/
\r
2013 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2015 BaseType_t xReturn;
\r
2017 configASSERT( xQueue );
\r
2018 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2024 xReturn = pdFALSE;
\r
2028 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2029 /*-----------------------------------------------------------*/
\r
2031 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2033 BaseType_t xReturn;
\r
2035 taskENTER_CRITICAL();
\r
2037 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2043 xReturn = pdFALSE;
\r
2046 taskEXIT_CRITICAL();
\r
2050 /*-----------------------------------------------------------*/
\r
2052 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2054 BaseType_t xReturn;
\r
2056 configASSERT( xQueue );
\r
2057 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2063 xReturn = pdFALSE;
\r
2067 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2068 /*-----------------------------------------------------------*/
\r
2070 #if ( configUSE_CO_ROUTINES == 1 )
\r
2072 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2074 BaseType_t xReturn;
\r
2075 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2077 /* If the queue is already full we may have to block. A critical section
\r
2078 is required to prevent an interrupt removing something from the queue
\r
2079 between the check to see if the queue is full and blocking on the queue. */
\r
2080 portDISABLE_INTERRUPTS();
\r
2082 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2084 /* The queue is full - do we want to block or just leave without
\r
2086 if( xTicksToWait > ( TickType_t ) 0 )
\r
2088 /* As this is called from a coroutine we cannot block directly, but
\r
2089 return indicating that we need to block. */
\r
2090 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2091 portENABLE_INTERRUPTS();
\r
2092 return errQUEUE_BLOCKED;
\r
2096 portENABLE_INTERRUPTS();
\r
2097 return errQUEUE_FULL;
\r
2101 portENABLE_INTERRUPTS();
\r
2103 portDISABLE_INTERRUPTS();
\r
2105 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2107 /* There is room in the queue, copy the data into the queue. */
\r
2108 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2111 /* Were any co-routines waiting for data to become available? */
\r
2112 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2114 /* In this instance the co-routine could be placed directly
\r
2115 into the ready list as we are within a critical section.
\r
2116 Instead the same pending ready list mechanism is used as if
\r
2117 the event were caused from within an interrupt. */
\r
2118 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2120 /* The co-routine waiting has a higher priority so record
\r
2121 that a yield might be appropriate. */
\r
2122 xReturn = errQUEUE_YIELD;
\r
2126 mtCOVERAGE_TEST_MARKER();
\r
2131 mtCOVERAGE_TEST_MARKER();
\r
2136 xReturn = errQUEUE_FULL;
\r
2139 portENABLE_INTERRUPTS();
\r
2144 #endif /* configUSE_CO_ROUTINES */
\r
2145 /*-----------------------------------------------------------*/
\r
2147 #if ( configUSE_CO_ROUTINES == 1 )
\r
2149 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2151 BaseType_t xReturn;
\r
2152 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2154 /* If the queue is already empty we may have to block. A critical section
\r
2155 is required to prevent an interrupt adding something to the queue
\r
2156 between the check to see if the queue is empty and blocking on the queue. */
\r
2157 portDISABLE_INTERRUPTS();
\r
2159 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2161 /* There are no messages in the queue, do we want to block or just
\r
2162 leave with nothing? */
\r
2163 if( xTicksToWait > ( TickType_t ) 0 )
\r
2165 /* As this is a co-routine we cannot block directly, but return
\r
2166 indicating that we need to block. */
\r
2167 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2168 portENABLE_INTERRUPTS();
\r
2169 return errQUEUE_BLOCKED;
\r
2173 portENABLE_INTERRUPTS();
\r
2174 return errQUEUE_FULL;
\r
2179 mtCOVERAGE_TEST_MARKER();
\r
2182 portENABLE_INTERRUPTS();
\r
2184 portDISABLE_INTERRUPTS();
\r
2186 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2188 /* Data is available from the queue. */
\r
2189 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2190 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2192 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2196 mtCOVERAGE_TEST_MARKER();
\r
2198 --( pxQueue->uxMessagesWaiting );
\r
2199 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2203 /* Were any co-routines waiting for space to become available? */
\r
2204 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2206 /* In this instance the co-routine could be placed directly
\r
2207 into the ready list as we are within a critical section.
\r
2208 Instead the same pending ready list mechanism is used as if
\r
2209 the event were caused from within an interrupt. */
\r
2210 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2212 xReturn = errQUEUE_YIELD;
\r
2216 mtCOVERAGE_TEST_MARKER();
\r
2221 mtCOVERAGE_TEST_MARKER();
\r
2229 portENABLE_INTERRUPTS();
\r
2234 #endif /* configUSE_CO_ROUTINES */
\r
2235 /*-----------------------------------------------------------*/
\r
2237 #if ( configUSE_CO_ROUTINES == 1 )
\r
2239 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2241 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2243 /* Cannot block within an ISR so if there is no space on the queue then
\r
2244 exit without doing anything. */
\r
2245 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2247 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2249 /* We only want to wake one co-routine per ISR, so check that a
\r
2250 co-routine has not already been woken. */
\r
2251 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2253 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2255 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2261 mtCOVERAGE_TEST_MARKER();
\r
2266 mtCOVERAGE_TEST_MARKER();
\r
2271 mtCOVERAGE_TEST_MARKER();
\r
2276 mtCOVERAGE_TEST_MARKER();
\r
2279 return xCoRoutinePreviouslyWoken;
\r
2282 #endif /* configUSE_CO_ROUTINES */
\r
2283 /*-----------------------------------------------------------*/
\r
2285 #if ( configUSE_CO_ROUTINES == 1 )
\r
2287 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2289 BaseType_t xReturn;
\r
2290 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2292 /* We cannot block from an ISR, so check there is data available. If
\r
2293 not then just leave without doing anything. */
\r
2294 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2296 /* Copy the data from the queue. */
\r
2297 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2298 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2300 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2304 mtCOVERAGE_TEST_MARKER();
\r
2306 --( pxQueue->uxMessagesWaiting );
\r
2307 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2309 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2311 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2313 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2315 *pxCoRoutineWoken = pdTRUE;
\r
2319 mtCOVERAGE_TEST_MARKER();
\r
2324 mtCOVERAGE_TEST_MARKER();
\r
2329 mtCOVERAGE_TEST_MARKER();
\r
2342 #endif /* configUSE_CO_ROUTINES */
\r
2343 /*-----------------------------------------------------------*/
\r
2345 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2347 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2351 /* See if there is an empty space in the registry. A NULL name denotes
\r
2353 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2355 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2357 /* Store the information on this queue. */
\r
2358 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2359 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2361 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2366 mtCOVERAGE_TEST_MARKER();
\r
2371 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2372 /*-----------------------------------------------------------*/
\r
2374 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2376 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2380 /* See if the handle of the queue being unregistered in actually in the
\r
2382 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2384 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2386 /* Set the name to NULL to show that this slot if free again. */
\r
2387 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2392 mtCOVERAGE_TEST_MARKER();
\r
2396 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2398 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2399 /*-----------------------------------------------------------*/
\r
2401 #if ( configUSE_TIMERS == 1 )
\r
2403 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2405 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2407 /* This function should not be called by application code hence the
\r
2408 'Restricted' in its name. It is not part of the public API. It is
\r
2409 designed for use by kernel code, and has special calling requirements.
\r
2410 It can result in vListInsert() being called on a list that can only
\r
2411 possibly ever have one item in it, so the list will be fast, but even
\r
2412 so it should be called with the scheduler locked and not from a critical
\r
2415 /* Only do anything if there are no messages in the queue. This function
\r
2416 will not actually cause the task to block, just place it on a blocked
\r
2417 list. It will not block until the scheduler is unlocked - at which
\r
2418 time a yield will be performed. If an item is added to the queue while
\r
2419 the queue is locked, and the calling task blocks on the queue, then the
\r
2420 calling task will be immediately unblocked when the queue is unlocked. */
\r
2421 prvLockQueue( pxQueue );
\r
2422 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2424 /* There is nothing in the queue, block for the specified period. */
\r
2425 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2429 mtCOVERAGE_TEST_MARKER();
\r
2431 prvUnlockQueue( pxQueue );
\r
2434 #endif /* configUSE_TIMERS */
\r
2435 /*-----------------------------------------------------------*/
\r
2437 #if ( configUSE_QUEUE_SETS == 1 )
\r
2439 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2441 QueueSetHandle_t pxQueue;
\r
2443 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2448 #endif /* configUSE_QUEUE_SETS */
\r
2449 /*-----------------------------------------------------------*/
\r
2451 #if ( configUSE_QUEUE_SETS == 1 )
\r
2453 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2455 BaseType_t xReturn;
\r
2457 taskENTER_CRITICAL();
\r
2459 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2461 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2464 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2466 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2467 items in the queue/semaphore. */
\r
2472 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2476 taskEXIT_CRITICAL();
\r
2481 #endif /* configUSE_QUEUE_SETS */
\r
2482 /*-----------------------------------------------------------*/
\r
2484 #if ( configUSE_QUEUE_SETS == 1 )
\r
2486 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2488 BaseType_t xReturn;
\r
2489 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2491 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2493 /* The queue was not a member of the set. */
\r
2496 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2498 /* It is dangerous to remove a queue from a set when the queue is
\r
2499 not empty because the queue set will still hold pending events for
\r
2505 taskENTER_CRITICAL();
\r
2507 /* The queue is no longer contained in the set. */
\r
2508 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2510 taskEXIT_CRITICAL();
\r
2515 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2517 #endif /* configUSE_QUEUE_SETS */
\r
2518 /*-----------------------------------------------------------*/
\r
2520 #if ( configUSE_QUEUE_SETS == 1 )
\r
2522 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2524 QueueSetMemberHandle_t xReturn = NULL;
\r
2526 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2530 #endif /* configUSE_QUEUE_SETS */
\r
2531 /*-----------------------------------------------------------*/
\r
2533 #if ( configUSE_QUEUE_SETS == 1 )
\r
2535 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2537 QueueSetMemberHandle_t xReturn = NULL;
\r
2539 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2543 #endif /* configUSE_QUEUE_SETS */
\r
2544 /*-----------------------------------------------------------*/
\r
2546 #if ( configUSE_QUEUE_SETS == 1 )
\r
2548 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2550 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2551 BaseType_t xReturn = pdFALSE;
\r
2553 /* This function must be called form a critical section. */
\r
2555 configASSERT( pxQueueSetContainer );
\r
2556 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2558 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2560 traceQUEUE_SEND( pxQueueSetContainer );
\r
2562 /* The data copied is the handle of the queue that contains data. */
\r
2563 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2565 if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
\r
2567 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2569 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2571 /* The task waiting has a higher priority. */
\r
2576 mtCOVERAGE_TEST_MARKER();
\r
2581 mtCOVERAGE_TEST_MARKER();
\r
2586 ( pxQueueSetContainer->xTxLock )++;
\r
2591 mtCOVERAGE_TEST_MARKER();
\r
2597 #endif /* configUSE_QUEUE_SETS */
\r