2 FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
\r
13 ***************************************************************************
\r
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
15 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
16 >>! obliged to provide the source code for proprietary components !<<
\r
17 >>! outside of the FreeRTOS kernel. !<<
\r
18 ***************************************************************************
\r
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
23 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * FreeRTOS provides completely free yet professionally developed, *
\r
28 * robust, strictly quality controlled, supported, and cross *
\r
29 * platform software that is more than just the market leader, it *
\r
30 * is the industry's de facto standard. *
\r
32 * Help yourself get started quickly while simultaneously helping *
\r
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
34 * tutorial book, reference manual, or both: *
\r
35 * http://www.FreeRTOS.org/Documentation *
\r
37 ***************************************************************************
\r
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
\r
40 the FAQ page "My application does not run, what could be wrong?". Have you
\r
41 defined configASSERT()?
\r
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
\r
44 embedded software for free we request you assist our global community by
\r
45 participating in the support forum.
\r
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
\r
48 be as productive as possible as early as possible. Now you can receive
\r
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
\r
50 Ltd, and the world's leading authority on the world's leading RTOS.
\r
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
\r
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
61 licenses offer ticketed support, indemnification and commercial middleware.
\r
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
64 engineered and independently SIL3 certified version for use in safety and
\r
65 mission critical applications that require provable dependability.
\r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
74 all the API functions to use the MPU wrappers. That should only be done when
\r
75 task.h is included from an application file. */
\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
78 #include "FreeRTOS.h"
\r
82 #if ( configUSE_CO_ROUTINES == 1 )
\r
83 #include "croutine.h"
\r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
88 header files above, but not in this file, in order to generate the correct
\r
89 privileged Vs unprivileged linkage and placement. */
\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
93 /* Constants used with the xRxLock and xTxLock structure members. */
\r
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
98 pcTail members are used as pointers into the queue storage area. When the
\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
102 names to the pcHead and pcTail structure members to ensure the readability of
\r
103 the code is maintained despite this dual use of two structure members. An
\r
104 alternative implementation would be to use a union, but use of a union is
\r
105 against the coding standard (although an exception to the standard has been
\r
106 permitted where the dual use also significantly changes the type of the
\r
107 structure member). */
\r
108 #define pxMutexHolder pcTail
\r
109 #define uxQueueType pcHead
\r
110 #define queueQUEUE_IS_MUTEX NULL
\r
112 /* Semaphores do not actually store or copy data, so have an item size of
\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
117 #if( configUSE_PREEMPTION == 0 )
\r
118 /* If the cooperative scheduler is being used then a yield should not be
\r
119 performed just because a higher priority task has been woken. */
\r
120 #define queueYIELD_IF_USING_PREEMPTION()
\r
122 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
126 * Definition of the queue used by the scheduler.
\r
127 * Items are queued by copy, not reference. See the following link for the
\r
128 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
130 typedef struct QueueDefinition
\r
132 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
133 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
134 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
136 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
138 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
139 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
142 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
143 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
145 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
146 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
147 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
149 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
150 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
152 #if ( configUSE_TRACE_FACILITY == 1 )
\r
153 UBaseType_t uxQueueNumber;
\r
154 uint8_t ucQueueType;
\r
157 #if ( configUSE_QUEUE_SETS == 1 )
\r
158 struct QueueDefinition *pxQueueSetContainer;
\r
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
164 name below to enable the use of older kernel aware debuggers. */
\r
165 typedef xQUEUE Queue_t;
\r
167 /*-----------------------------------------------------------*/
\r
170 * The queue registry is just a means for kernel aware debuggers to locate
\r
171 * queue structures. It has no other purpose so is an optional component.
\r
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
175 /* The type stored within the queue registry array. This allows a name
\r
176 to be assigned to each queue making kernel aware debugging a little
\r
177 more user friendly. */
\r
178 typedef struct QUEUE_REGISTRY_ITEM
\r
180 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
181 QueueHandle_t xHandle;
\r
182 } xQueueRegistryItem;
\r
184 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
185 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
187 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
189 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
190 The pcQueueName member of a structure being NULL is indicative of the
\r
191 array position being vacant. */
\r
192 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
194 #endif /* configQUEUE_REGISTRY_SIZE */
\r
197 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
198 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
199 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
200 * queue is locked it will instead increment the appropriate queue lock count
\r
201 * to indicate that a task may require unblocking. When the queue in unlocked
\r
202 * these lock counts are inspected, and the appropriate action taken.
\r
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
207 * Uses a critical section to determine if there is any data in a queue.
\r
209 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
214 * Uses a critical section to determine if there is any space in a queue.
\r
216 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
221 * Copies an item into the queue, either at the front of the queue or the
\r
222 * back of the queue.
\r
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
227 * Copies an item out of a queue.
\r
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
231 #if ( configUSE_QUEUE_SETS == 1 )
\r
233 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
234 * the queue set that the queue contains data.
\r
236 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
239 /*-----------------------------------------------------------*/
\r
242 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
243 * accessing the queue event lists.
\r
245 #define prvLockQueue( pxQueue ) \
\r
246 taskENTER_CRITICAL(); \
\r
248 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
250 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
252 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
254 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
257 taskEXIT_CRITICAL()
\r
258 /*-----------------------------------------------------------*/
\r
260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
264 configASSERT( pxQueue );
\r
266 taskENTER_CRITICAL();
\r
268 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
269 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
270 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
271 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
272 pxQueue->xRxLock = queueUNLOCKED;
\r
273 pxQueue->xTxLock = queueUNLOCKED;
\r
275 if( xNewQueue == pdFALSE )
\r
277 /* If there are tasks blocked waiting to read from the queue, then
\r
278 the tasks will remain blocked as after this function exits the queue
\r
279 will still be empty. If there are tasks blocked waiting to write to
\r
280 the queue, then one should be unblocked as after this function exits
\r
281 it will be possible to write to it. */
\r
282 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
284 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
286 queueYIELD_IF_USING_PREEMPTION();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 mtCOVERAGE_TEST_MARKER();
\r
300 /* Ensure the event queues start in the correct state. */
\r
301 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
302 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
305 taskEXIT_CRITICAL();
\r
307 /* A value is returned for calling semantic consistency with previous
\r
311 /*-----------------------------------------------------------*/
\r
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
315 Queue_t *pxNewQueue;
\r
316 size_t xQueueSizeInBytes;
\r
317 QueueHandle_t xReturn = NULL;
\r
319 /* Remove compiler warnings about unused parameters should
\r
320 configUSE_TRACE_FACILITY not be set to 1. */
\r
321 ( void ) ucQueueType;
\r
323 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
325 if( uxItemSize == ( UBaseType_t ) 0 )
\r
327 /* There is not going to be a queue storage area. */
\r
328 xQueueSizeInBytes = ( size_t ) 0;
\r
332 /* The queue is one byte longer than asked for to make wrap checking
\r
334 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
337 /* Allocate the new queue structure and storage area. */
\r
338 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
340 if( pxNewQueue != NULL )
\r
342 if( uxItemSize == ( UBaseType_t ) 0 )
\r
344 /* No RAM was allocated for the queue storage area, but PC head
\r
345 cannot be set to NULL because NULL is used as a key to say the queue
\r
346 is used as a mutex. Therefore just set pcHead to point to the queue
\r
347 as a benign value that is known to be within the memory map. */
\r
348 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
352 /* Jump past the queue structure to find the location of the queue
\r
354 pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
357 /* Initialise the queue members as described above where the queue type
\r
359 pxNewQueue->uxLength = uxQueueLength;
\r
360 pxNewQueue->uxItemSize = uxItemSize;
\r
361 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
363 #if ( configUSE_TRACE_FACILITY == 1 )
\r
365 pxNewQueue->ucQueueType = ucQueueType;
\r
367 #endif /* configUSE_TRACE_FACILITY */
\r
369 #if( configUSE_QUEUE_SETS == 1 )
\r
371 pxNewQueue->pxQueueSetContainer = NULL;
\r
373 #endif /* configUSE_QUEUE_SETS */
\r
375 traceQUEUE_CREATE( pxNewQueue );
\r
376 xReturn = pxNewQueue;
\r
380 mtCOVERAGE_TEST_MARKER();
\r
383 configASSERT( xReturn );
\r
387 /*-----------------------------------------------------------*/
\r
389 #if ( configUSE_MUTEXES == 1 )
\r
391 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
393 Queue_t *pxNewQueue;
\r
395 /* Prevent compiler warnings about unused parameters if
\r
396 configUSE_TRACE_FACILITY does not equal 1. */
\r
397 ( void ) ucQueueType;
\r
399 /* Allocate the new queue structure. */
\r
400 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
401 if( pxNewQueue != NULL )
\r
403 /* Information required for priority inheritance. */
\r
404 pxNewQueue->pxMutexHolder = NULL;
\r
405 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
407 /* Queues used as a mutex no data is actually copied into or out
\r
409 pxNewQueue->pcWriteTo = NULL;
\r
410 pxNewQueue->u.pcReadFrom = NULL;
\r
412 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
413 an item size of 0 as nothing is actually copied into or out
\r
415 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
416 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
417 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
418 pxNewQueue->xRxLock = queueUNLOCKED;
\r
419 pxNewQueue->xTxLock = queueUNLOCKED;
\r
421 #if ( configUSE_TRACE_FACILITY == 1 )
\r
423 pxNewQueue->ucQueueType = ucQueueType;
\r
427 #if ( configUSE_QUEUE_SETS == 1 )
\r
429 pxNewQueue->pxQueueSetContainer = NULL;
\r
433 /* Ensure the event queues start with the correct state. */
\r
434 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
435 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
437 traceCREATE_MUTEX( pxNewQueue );
\r
439 /* Start with the semaphore in the expected state. */
\r
440 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
444 traceCREATE_MUTEX_FAILED();
\r
450 #endif /* configUSE_MUTEXES */
\r
451 /*-----------------------------------------------------------*/
\r
453 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
455 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
459 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
460 be called directly. Note: This is a good way of determining if the
\r
461 calling task is the mutex holder, but not a good way of determining the
\r
462 identity of the mutex holder, as the holder may change between the
\r
463 following critical section exiting and the function returning. */
\r
464 taskENTER_CRITICAL();
\r
466 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
468 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
475 taskEXIT_CRITICAL();
\r
478 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
481 /*-----------------------------------------------------------*/
\r
483 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
485 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
487 BaseType_t xReturn;
\r
488 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
490 configASSERT( pxMutex );
\r
492 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
493 change outside of this task. If this task does not hold the mutex then
\r
494 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
495 this is the only condition we are interested in it does not matter if
\r
496 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
497 mutual exclusion is required to test the pxMutexHolder variable. */
\r
498 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
500 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
502 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
503 the task handle, therefore no underflow check is required. Also,
\r
504 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
505 there can only be one, no mutual exclusion is required to modify the
\r
506 uxRecursiveCallCount member. */
\r
507 ( pxMutex->u.uxRecursiveCallCount )--;
\r
509 /* Have we unwound the call count? */
\r
510 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
512 /* Return the mutex. This will automatically unblock any other
\r
513 task that might be waiting to access the mutex. */
\r
514 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
518 mtCOVERAGE_TEST_MARKER();
\r
525 /* The mutex cannot be given because the calling task is not the
\r
529 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
535 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
536 /*-----------------------------------------------------------*/
\r
538 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
540 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
542 BaseType_t xReturn;
\r
543 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
545 configASSERT( pxMutex );
\r
547 /* Comments regarding mutual exclusion as per those within
\r
548 xQueueGiveMutexRecursive(). */
\r
550 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
552 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
554 ( pxMutex->u.uxRecursiveCallCount )++;
\r
559 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
561 /* pdPASS will only be returned if the mutex was successfully
\r
562 obtained. The calling task may have entered the Blocked state
\r
563 before reaching here. */
\r
564 if( xReturn == pdPASS )
\r
566 ( pxMutex->u.uxRecursiveCallCount )++;
\r
570 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
577 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
578 /*-----------------------------------------------------------*/
\r
580 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
582 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
584 QueueHandle_t xHandle;
\r
586 configASSERT( uxMaxCount != 0 );
\r
587 configASSERT( uxInitialCount <= uxMaxCount );
\r
589 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
591 if( xHandle != NULL )
\r
593 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
595 traceCREATE_COUNTING_SEMAPHORE();
\r
599 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
602 configASSERT( xHandle );
\r
606 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
607 /*-----------------------------------------------------------*/
\r
609 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
611 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
612 TimeOut_t xTimeOut;
\r
613 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
615 configASSERT( pxQueue );
\r
616 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
617 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
618 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
620 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
625 /* This function relaxes the coding standard somewhat to allow return
\r
626 statements within the function itself. This is done in the interest
\r
627 of execution time efficiency. */
\r
630 taskENTER_CRITICAL();
\r
632 /* Is there room on the queue now? The running task must be the
\r
633 highest priority task wanting to access the queue. If the head item
\r
634 in the queue is to be overwritten then it does not matter if the
\r
636 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
638 traceQUEUE_SEND( pxQueue );
\r
639 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
641 #if ( configUSE_QUEUE_SETS == 1 )
\r
643 if( pxQueue->pxQueueSetContainer != NULL )
\r
645 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
647 /* The queue is a member of a queue set, and posting
\r
648 to the queue set caused a higher priority task to
\r
649 unblock. A context switch is required. */
\r
650 queueYIELD_IF_USING_PREEMPTION();
\r
654 mtCOVERAGE_TEST_MARKER();
\r
659 /* If there was a task waiting for data to arrive on the
\r
660 queue then unblock it now. */
\r
661 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
663 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
665 /* The unblocked task has a priority higher than
\r
666 our own so yield immediately. Yes it is ok to
\r
667 do this from within the critical section - the
\r
668 kernel takes care of that. */
\r
669 queueYIELD_IF_USING_PREEMPTION();
\r
673 mtCOVERAGE_TEST_MARKER();
\r
676 else if( xYieldRequired != pdFALSE )
\r
678 /* This path is a special case that will only get
\r
679 executed if the task was holding multiple mutexes
\r
680 and the mutexes were given back in an order that is
\r
681 different to that in which they were taken. */
\r
682 queueYIELD_IF_USING_PREEMPTION();
\r
686 mtCOVERAGE_TEST_MARKER();
\r
690 #else /* configUSE_QUEUE_SETS */
\r
692 /* If there was a task waiting for data to arrive on the
\r
693 queue then unblock it now. */
\r
694 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
696 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
698 /* The unblocked task has a priority higher than
\r
699 our own so yield immediately. Yes it is ok to do
\r
700 this from within the critical section - the kernel
\r
701 takes care of that. */
\r
702 queueYIELD_IF_USING_PREEMPTION();
\r
706 mtCOVERAGE_TEST_MARKER();
\r
709 else if( xYieldRequired != pdFALSE )
\r
711 /* This path is a special case that will only get
\r
712 executed if the task was holding multiple mutexes and
\r
713 the mutexes were given back in an order that is
\r
714 different to that in which they were taken. */
\r
715 queueYIELD_IF_USING_PREEMPTION();
\r
719 mtCOVERAGE_TEST_MARKER();
\r
722 #endif /* configUSE_QUEUE_SETS */
\r
724 taskEXIT_CRITICAL();
\r
729 if( xTicksToWait == ( TickType_t ) 0 )
\r
731 /* The queue was full and no block time is specified (or
\r
732 the block time has expired) so leave now. */
\r
733 taskEXIT_CRITICAL();
\r
735 /* Return to the original privilege level before exiting
\r
737 traceQUEUE_SEND_FAILED( pxQueue );
\r
738 return errQUEUE_FULL;
\r
740 else if( xEntryTimeSet == pdFALSE )
\r
742 /* The queue was full and a block time was specified so
\r
743 configure the timeout structure. */
\r
744 vTaskSetTimeOutState( &xTimeOut );
\r
745 xEntryTimeSet = pdTRUE;
\r
749 /* Entry time was already set. */
\r
750 mtCOVERAGE_TEST_MARKER();
\r
754 taskEXIT_CRITICAL();
\r
756 /* Interrupts and other tasks can send to and receive from the queue
\r
757 now the critical section has been exited. */
\r
760 prvLockQueue( pxQueue );
\r
762 /* Update the timeout state to see if it has expired yet. */
\r
763 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
765 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
767 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
768 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
770 /* Unlocking the queue means queue events can effect the
\r
771 event list. It is possible that interrupts occurring now
\r
772 remove this task from the event list again - but as the
\r
773 scheduler is suspended the task will go onto the pending
\r
774 ready last instead of the actual ready list. */
\r
775 prvUnlockQueue( pxQueue );
\r
777 /* Resuming the scheduler will move tasks from the pending
\r
778 ready list into the ready list - so it is feasible that this
\r
779 task is already in a ready list before it yields - in which
\r
780 case the yield will not cause a context switch unless there
\r
781 is also a higher priority task in the pending ready list. */
\r
782 if( xTaskResumeAll() == pdFALSE )
\r
784 portYIELD_WITHIN_API();
\r
790 prvUnlockQueue( pxQueue );
\r
791 ( void ) xTaskResumeAll();
\r
796 /* The timeout has expired. */
\r
797 prvUnlockQueue( pxQueue );
\r
798 ( void ) xTaskResumeAll();
\r
800 /* Return to the original privilege level before exiting the
\r
802 traceQUEUE_SEND_FAILED( pxQueue );
\r
803 return errQUEUE_FULL;
\r
807 /*-----------------------------------------------------------*/
\r
809 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
811 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
813 BaseType_t xEntryTimeSet = pdFALSE;
\r
814 TimeOut_t xTimeOut;
\r
815 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
817 configASSERT( pxQueue );
\r
818 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
822 taskENTER_CRITICAL();
\r
824 /* Is there room on the queue now? To be running we must be
\r
825 the highest priority task wanting to access the queue. */
\r
826 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
828 traceQUEUE_SEND( pxQueue );
\r
829 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
831 /* If there was a task waiting for data to arrive on the
\r
832 queue then unblock it now. */
\r
833 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
835 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
837 /* The unblocked task has a priority higher than
\r
838 our own so yield immediately. */
\r
839 portYIELD_WITHIN_API();
\r
843 mtCOVERAGE_TEST_MARKER();
\r
848 mtCOVERAGE_TEST_MARKER();
\r
851 taskEXIT_CRITICAL();
\r
856 if( xTicksToWait == ( TickType_t ) 0 )
\r
858 taskEXIT_CRITICAL();
\r
859 return errQUEUE_FULL;
\r
861 else if( xEntryTimeSet == pdFALSE )
\r
863 vTaskSetTimeOutState( &xTimeOut );
\r
864 xEntryTimeSet = pdTRUE;
\r
868 taskEXIT_CRITICAL();
\r
870 taskENTER_CRITICAL();
\r
872 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
874 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
876 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
877 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
878 portYIELD_WITHIN_API();
\r
882 mtCOVERAGE_TEST_MARKER();
\r
887 taskEXIT_CRITICAL();
\r
888 traceQUEUE_SEND_FAILED( pxQueue );
\r
889 return errQUEUE_FULL;
\r
892 taskEXIT_CRITICAL();
\r
896 #endif /* configUSE_ALTERNATIVE_API */
\r
897 /*-----------------------------------------------------------*/
\r
899 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
901 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
903 BaseType_t xEntryTimeSet = pdFALSE;
\r
904 TimeOut_t xTimeOut;
\r
905 int8_t *pcOriginalReadPosition;
\r
906 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
908 configASSERT( pxQueue );
\r
909 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
913 taskENTER_CRITICAL();
\r
915 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
917 /* Remember our read position in case we are just peeking. */
\r
918 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
920 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
922 if( xJustPeeking == pdFALSE )
\r
924 traceQUEUE_RECEIVE( pxQueue );
\r
926 /* Data is actually being removed (not just peeked). */
\r
927 --( pxQueue->uxMessagesWaiting );
\r
929 #if ( configUSE_MUTEXES == 1 )
\r
931 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
933 /* Record the information required to implement
\r
934 priority inheritance should it become necessary. */
\r
935 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
939 mtCOVERAGE_TEST_MARKER();
\r
944 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
946 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
948 portYIELD_WITHIN_API();
\r
952 mtCOVERAGE_TEST_MARKER();
\r
958 traceQUEUE_PEEK( pxQueue );
\r
960 /* The data is not being removed, so reset our read
\r
962 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
964 /* The data is being left in the queue, so see if there are
\r
965 any other tasks waiting for the data. */
\r
966 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
968 /* Tasks that are removed from the event list will get added to
\r
969 the pending ready list as the scheduler is still suspended. */
\r
970 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
972 /* The task waiting has a higher priority than this task. */
\r
973 portYIELD_WITHIN_API();
\r
977 mtCOVERAGE_TEST_MARKER();
\r
982 mtCOVERAGE_TEST_MARKER();
\r
986 taskEXIT_CRITICAL();
\r
991 if( xTicksToWait == ( TickType_t ) 0 )
\r
993 taskEXIT_CRITICAL();
\r
994 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
995 return errQUEUE_EMPTY;
\r
997 else if( xEntryTimeSet == pdFALSE )
\r
999 vTaskSetTimeOutState( &xTimeOut );
\r
1000 xEntryTimeSet = pdTRUE;
\r
1004 taskEXIT_CRITICAL();
\r
1006 taskENTER_CRITICAL();
\r
1008 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1010 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1012 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1014 #if ( configUSE_MUTEXES == 1 )
\r
1016 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1018 taskENTER_CRITICAL();
\r
1020 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1022 taskEXIT_CRITICAL();
\r
1026 mtCOVERAGE_TEST_MARKER();
\r
1031 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1032 portYIELD_WITHIN_API();
\r
1036 mtCOVERAGE_TEST_MARKER();
\r
1041 taskEXIT_CRITICAL();
\r
1042 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1043 return errQUEUE_EMPTY;
\r
1046 taskEXIT_CRITICAL();
\r
1051 #endif /* configUSE_ALTERNATIVE_API */
\r
1052 /*-----------------------------------------------------------*/
\r
1054 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1056 BaseType_t xReturn;
\r
1057 UBaseType_t uxSavedInterruptStatus;
\r
1058 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1060 configASSERT( pxQueue );
\r
1061 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1062 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1064 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1065 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1066 above the maximum system call priority are kept permanently enabled, even
\r
1067 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1068 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1069 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1070 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1071 assigned a priority above the configured maximum system call priority.
\r
1072 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1073 that have been assigned a priority at or (logically) below the maximum
\r
1074 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1075 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1076 More information (albeit Cortex-M specific) is provided on the following
\r
1077 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1078 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1080 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1081 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1082 read, instead return a flag to say whether a context switch is required or
\r
1083 not (i.e. has a task with a higher priority than us been woken by this
\r
1085 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1087 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1089 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1091 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
1092 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
1093 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
1094 called here even though the disinherit function does not check if
\r
1095 the scheduler is suspended before accessing the ready lists. */
\r
1096 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1098 /* The event list is not altered if the queue is locked. This will
\r
1099 be done when the queue is unlocked later. */
\r
1100 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1102 #if ( configUSE_QUEUE_SETS == 1 )
\r
1104 if( pxQueue->pxQueueSetContainer != NULL )
\r
1106 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1108 /* The queue is a member of a queue set, and posting
\r
1109 to the queue set caused a higher priority task to
\r
1110 unblock. A context switch is required. */
\r
1111 if( pxHigherPriorityTaskWoken != NULL )
\r
1113 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1117 mtCOVERAGE_TEST_MARKER();
\r
1122 mtCOVERAGE_TEST_MARKER();
\r
1127 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1129 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1131 /* The task waiting has a higher priority so
\r
1132 record that a context switch is required. */
\r
1133 if( pxHigherPriorityTaskWoken != NULL )
\r
1135 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1139 mtCOVERAGE_TEST_MARKER();
\r
1144 mtCOVERAGE_TEST_MARKER();
\r
1149 mtCOVERAGE_TEST_MARKER();
\r
1153 #else /* configUSE_QUEUE_SETS */
\r
1155 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1157 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1159 /* The task waiting has a higher priority so record that a
\r
1160 context switch is required. */
\r
1161 if( pxHigherPriorityTaskWoken != NULL )
\r
1163 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1167 mtCOVERAGE_TEST_MARKER();
\r
1172 mtCOVERAGE_TEST_MARKER();
\r
1177 mtCOVERAGE_TEST_MARKER();
\r
1180 #endif /* configUSE_QUEUE_SETS */
\r
1184 /* Increment the lock count so the task that unlocks the queue
\r
1185 knows that data was posted while it was locked. */
\r
1186 ++( pxQueue->xTxLock );
\r
1193 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1194 xReturn = errQUEUE_FULL;
\r
1197 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1201 /*-----------------------------------------------------------*/
\r
1203 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1205 BaseType_t xReturn;
\r
1206 UBaseType_t uxSavedInterruptStatus;
\r
1207 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1209 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1210 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1211 read, instead return a flag to say whether a context switch is required or
\r
1212 not (i.e. has a task with a higher priority than us been woken by this
\r
1215 configASSERT( pxQueue );
\r
1217 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1218 if the item size is not 0. */
\r
1219 configASSERT( pxQueue->uxItemSize == 0 );
\r
1221 /* Normally a mutex would not be given from an interrupt, especially if
\r
1222 there is a mutex holder, as priority inheritance makes no sense for an
\r
1223 interrupts, only tasks. */
\r
1224 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1226 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1227 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1228 above the maximum system call priority are kept permanently enabled, even
\r
1229 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1230 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1231 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1232 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1233 assigned a priority above the configured maximum system call priority.
\r
1234 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1235 that have been assigned a priority at or (logically) below the maximum
\r
1236 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1237 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1238 More information (albeit Cortex-M specific) is provided on the following
\r
1239 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1240 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1242 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1244 /* When the queue is used to implement a semaphore no data is ever
\r
1245 moved through the queue but it is still valid to see if the queue 'has
\r
1247 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1249 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1251 /* A task can only have an inherited priority if it is a mutex
\r
1252 holder - and if there is a mutex holder then the mutex cannot be
\r
1253 given from an ISR. As this is the ISR version of the function it
\r
1254 can be assumed there is no mutex holder and no need to determine if
\r
1255 priority disinheritance is needed. Simply increase the count of
\r
1256 messages (semaphores) available. */
\r
1257 ++( pxQueue->uxMessagesWaiting );
\r
1259 /* The event list is not altered if the queue is locked. This will
\r
1260 be done when the queue is unlocked later. */
\r
1261 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1263 #if ( configUSE_QUEUE_SETS == 1 )
\r
1265 if( pxQueue->pxQueueSetContainer != NULL )
\r
1267 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1269 /* The semaphore is a member of a queue set, and
\r
1270 posting to the queue set caused a higher priority
\r
1271 task to unblock. A context switch is required. */
\r
1272 if( pxHigherPriorityTaskWoken != NULL )
\r
1274 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1278 mtCOVERAGE_TEST_MARKER();
\r
1283 mtCOVERAGE_TEST_MARKER();
\r
1288 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1290 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1292 /* The task waiting has a higher priority so
\r
1293 record that a context switch is required. */
\r
1294 if( pxHigherPriorityTaskWoken != NULL )
\r
1296 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1300 mtCOVERAGE_TEST_MARKER();
\r
1305 mtCOVERAGE_TEST_MARKER();
\r
1310 mtCOVERAGE_TEST_MARKER();
\r
1314 #else /* configUSE_QUEUE_SETS */
\r
1316 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1318 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1320 /* The task waiting has a higher priority so record that a
\r
1321 context switch is required. */
\r
1322 if( pxHigherPriorityTaskWoken != NULL )
\r
1324 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1328 mtCOVERAGE_TEST_MARKER();
\r
1333 mtCOVERAGE_TEST_MARKER();
\r
1338 mtCOVERAGE_TEST_MARKER();
\r
1341 #endif /* configUSE_QUEUE_SETS */
\r
1345 /* Increment the lock count so the task that unlocks the queue
\r
1346 knows that data was posted while it was locked. */
\r
1347 ++( pxQueue->xTxLock );
\r
1354 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1355 xReturn = errQUEUE_FULL;
\r
1358 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1362 /*-----------------------------------------------------------*/
\r
1364 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1366 BaseType_t xEntryTimeSet = pdFALSE;
\r
1367 TimeOut_t xTimeOut;
\r
1368 int8_t *pcOriginalReadPosition;
\r
1369 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1371 configASSERT( pxQueue );
\r
1372 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1373 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1375 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1379 /* This function relaxes the coding standard somewhat to allow return
\r
1380 statements within the function itself. This is done in the interest
\r
1381 of execution time efficiency. */
\r
1385 taskENTER_CRITICAL();
\r
1387 /* Is there data in the queue now? To be running the calling task
\r
1388 must be the highest priority task wanting to access the queue. */
\r
1389 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1391 /* Remember the read position in case the queue is only being
\r
1393 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1395 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1397 if( xJustPeeking == pdFALSE )
\r
1399 traceQUEUE_RECEIVE( pxQueue );
\r
1401 /* Actually removing data, not just peeking. */
\r
1402 --( pxQueue->uxMessagesWaiting );
\r
1404 #if ( configUSE_MUTEXES == 1 )
\r
1406 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1408 /* Record the information required to implement
\r
1409 priority inheritance should it become necessary. */
\r
1410 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1414 mtCOVERAGE_TEST_MARKER();
\r
1417 #endif /* configUSE_MUTEXES */
\r
1419 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1421 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1423 queueYIELD_IF_USING_PREEMPTION();
\r
1427 mtCOVERAGE_TEST_MARKER();
\r
1432 mtCOVERAGE_TEST_MARKER();
\r
1437 traceQUEUE_PEEK( pxQueue );
\r
1439 /* The data is not being removed, so reset the read
\r
1441 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1443 /* The data is being left in the queue, so see if there are
\r
1444 any other tasks waiting for the data. */
\r
1445 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1447 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1449 /* The task waiting has a higher priority than this task. */
\r
1450 queueYIELD_IF_USING_PREEMPTION();
\r
1454 mtCOVERAGE_TEST_MARKER();
\r
1459 mtCOVERAGE_TEST_MARKER();
\r
1463 taskEXIT_CRITICAL();
\r
1468 if( xTicksToWait == ( TickType_t ) 0 )
\r
1470 /* The queue was empty and no block time is specified (or
\r
1471 the block time has expired) so leave now. */
\r
1472 taskEXIT_CRITICAL();
\r
1473 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1474 return errQUEUE_EMPTY;
\r
1476 else if( xEntryTimeSet == pdFALSE )
\r
1478 /* The queue was empty and a block time was specified so
\r
1479 configure the timeout structure. */
\r
1480 vTaskSetTimeOutState( &xTimeOut );
\r
1481 xEntryTimeSet = pdTRUE;
\r
1485 /* Entry time was already set. */
\r
1486 mtCOVERAGE_TEST_MARKER();
\r
1490 taskEXIT_CRITICAL();
\r
1492 /* Interrupts and other tasks can send to and receive from the queue
\r
1493 now the critical section has been exited. */
\r
1495 vTaskSuspendAll();
\r
1496 prvLockQueue( pxQueue );
\r
1498 /* Update the timeout state to see if it has expired yet. */
\r
1499 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1501 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1503 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1505 #if ( configUSE_MUTEXES == 1 )
\r
1507 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1509 taskENTER_CRITICAL();
\r
1511 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1513 taskEXIT_CRITICAL();
\r
1517 mtCOVERAGE_TEST_MARKER();
\r
1522 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1523 prvUnlockQueue( pxQueue );
\r
1524 if( xTaskResumeAll() == pdFALSE )
\r
1526 portYIELD_WITHIN_API();
\r
1530 mtCOVERAGE_TEST_MARKER();
\r
1536 prvUnlockQueue( pxQueue );
\r
1537 ( void ) xTaskResumeAll();
\r
1542 prvUnlockQueue( pxQueue );
\r
1543 ( void ) xTaskResumeAll();
\r
1544 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1545 return errQUEUE_EMPTY;
\r
1549 /*-----------------------------------------------------------*/
\r
1551 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1553 BaseType_t xReturn;
\r
1554 UBaseType_t uxSavedInterruptStatus;
\r
1555 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1557 configASSERT( pxQueue );
\r
1558 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1560 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1561 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1562 above the maximum system call priority are kept permanently enabled, even
\r
1563 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1564 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1565 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1566 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1567 assigned a priority above the configured maximum system call priority.
\r
1568 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1569 that have been assigned a priority at or (logically) below the maximum
\r
1570 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1571 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1572 More information (albeit Cortex-M specific) is provided on the following
\r
1573 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1574 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1576 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1578 /* Cannot block in an ISR, so check there is data available. */
\r
1579 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1581 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1583 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1584 --( pxQueue->uxMessagesWaiting );
\r
1586 /* If the queue is locked the event list will not be modified.
\r
1587 Instead update the lock count so the task that unlocks the queue
\r
1588 will know that an ISR has removed data while the queue was
\r
1590 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1592 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1594 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1596 /* The task waiting has a higher priority than us so
\r
1597 force a context switch. */
\r
1598 if( pxHigherPriorityTaskWoken != NULL )
\r
1600 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1604 mtCOVERAGE_TEST_MARKER();
\r
1609 mtCOVERAGE_TEST_MARKER();
\r
1614 mtCOVERAGE_TEST_MARKER();
\r
1619 /* Increment the lock count so the task that unlocks the queue
\r
1620 knows that data was removed while it was locked. */
\r
1621 ++( pxQueue->xRxLock );
\r
1629 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1632 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1636 /*-----------------------------------------------------------*/
\r
1638 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1640 BaseType_t xReturn;
\r
1641 UBaseType_t uxSavedInterruptStatus;
\r
1642 int8_t *pcOriginalReadPosition;
\r
1643 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1645 configASSERT( pxQueue );
\r
1646 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1647 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1649 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1650 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1651 above the maximum system call priority are kept permanently enabled, even
\r
1652 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1653 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1654 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1655 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1656 assigned a priority above the configured maximum system call priority.
\r
1657 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1658 that have been assigned a priority at or (logically) below the maximum
\r
1659 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1660 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1661 More information (albeit Cortex-M specific) is provided on the following
\r
1662 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1663 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1665 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1667 /* Cannot block in an ISR, so check there is data available. */
\r
1668 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1670 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1672 /* Remember the read position so it can be reset as nothing is
\r
1673 actually being removed from the queue. */
\r
1674 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1675 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1676 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1683 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1686 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1690 /*-----------------------------------------------------------*/
\r
1692 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1694 UBaseType_t uxReturn;
\r
1696 configASSERT( xQueue );
\r
1698 taskENTER_CRITICAL();
\r
1700 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1702 taskEXIT_CRITICAL();
\r
1705 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1706 /*-----------------------------------------------------------*/
\r
1708 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1710 UBaseType_t uxReturn;
\r
1713 pxQueue = ( Queue_t * ) xQueue;
\r
1714 configASSERT( pxQueue );
\r
1716 taskENTER_CRITICAL();
\r
1718 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1720 taskEXIT_CRITICAL();
\r
1723 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1724 /*-----------------------------------------------------------*/
\r
1726 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1728 UBaseType_t uxReturn;
\r
1730 configASSERT( xQueue );
\r
1732 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1735 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1736 /*-----------------------------------------------------------*/
\r
1738 void vQueueDelete( QueueHandle_t xQueue )
\r
1740 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1742 configASSERT( pxQueue );
\r
1744 traceQUEUE_DELETE( pxQueue );
\r
1745 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1747 vQueueUnregisterQueue( pxQueue );
\r
1750 vPortFree( pxQueue );
\r
1752 /*-----------------------------------------------------------*/
\r
1754 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1756 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1758 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1761 #endif /* configUSE_TRACE_FACILITY */
\r
1762 /*-----------------------------------------------------------*/
\r
1764 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1766 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1768 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1771 #endif /* configUSE_TRACE_FACILITY */
\r
1772 /*-----------------------------------------------------------*/
\r
1774 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1776 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1778 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1781 #endif /* configUSE_TRACE_FACILITY */
\r
1782 /*-----------------------------------------------------------*/
\r
1784 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1786 BaseType_t xReturn = pdFALSE;
\r
1788 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1790 #if ( configUSE_MUTEXES == 1 )
\r
1792 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1794 /* The mutex is no longer being held. */
\r
1795 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1796 pxQueue->pxMutexHolder = NULL;
\r
1800 mtCOVERAGE_TEST_MARKER();
\r
1803 #endif /* configUSE_MUTEXES */
\r
1805 else if( xPosition == queueSEND_TO_BACK )
\r
1807 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1808 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1809 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1811 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1815 mtCOVERAGE_TEST_MARKER();
\r
1820 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1821 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1822 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1824 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1828 mtCOVERAGE_TEST_MARKER();
\r
1831 if( xPosition == queueOVERWRITE )
\r
1833 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1835 /* An item is not being added but overwritten, so subtract
\r
1836 one from the recorded number of items in the queue so when
\r
1837 one is added again below the number of recorded items remains
\r
1839 --( pxQueue->uxMessagesWaiting );
\r
1843 mtCOVERAGE_TEST_MARKER();
\r
1848 mtCOVERAGE_TEST_MARKER();
\r
1852 ++( pxQueue->uxMessagesWaiting );
\r
1856 /*-----------------------------------------------------------*/
\r
1858 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1860 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
1862 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1863 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1865 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1869 mtCOVERAGE_TEST_MARKER();
\r
1871 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1874 /*-----------------------------------------------------------*/
\r
1876 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1878 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1880 /* The lock counts contains the number of extra data items placed or
\r
1881 removed from the queue while the queue was locked. When a queue is
\r
1882 locked items can be added or removed, but the event lists cannot be
\r
1884 taskENTER_CRITICAL();
\r
1886 /* See if data was added to the queue while it was locked. */
\r
1887 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1889 /* Data was posted while the queue was locked. Are any tasks
\r
1890 blocked waiting for data to become available? */
\r
1891 #if ( configUSE_QUEUE_SETS == 1 )
\r
1893 if( pxQueue->pxQueueSetContainer != NULL )
\r
1895 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1897 /* The queue is a member of a queue set, and posting to
\r
1898 the queue set caused a higher priority task to unblock.
\r
1899 A context switch is required. */
\r
1900 vTaskMissedYield();
\r
1904 mtCOVERAGE_TEST_MARKER();
\r
1909 /* Tasks that are removed from the event list will get added to
\r
1910 the pending ready list as the scheduler is still suspended. */
\r
1911 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1913 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1915 /* The task waiting has a higher priority so record that a
\r
1916 context switch is required. */
\r
1917 vTaskMissedYield();
\r
1921 mtCOVERAGE_TEST_MARKER();
\r
1930 #else /* configUSE_QUEUE_SETS */
\r
1932 /* Tasks that are removed from the event list will get added to
\r
1933 the pending ready list as the scheduler is still suspended. */
\r
1934 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1936 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1938 /* The task waiting has a higher priority so record that a
\r
1939 context switch is required. */
\r
1940 vTaskMissedYield();
\r
1944 mtCOVERAGE_TEST_MARKER();
\r
1952 #endif /* configUSE_QUEUE_SETS */
\r
1954 --( pxQueue->xTxLock );
\r
1957 pxQueue->xTxLock = queueUNLOCKED;
\r
1959 taskEXIT_CRITICAL();
\r
1961 /* Do the same for the Rx lock. */
\r
1962 taskENTER_CRITICAL();
\r
1964 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1966 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1968 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1970 vTaskMissedYield();
\r
1974 mtCOVERAGE_TEST_MARKER();
\r
1977 --( pxQueue->xRxLock );
\r
1985 pxQueue->xRxLock = queueUNLOCKED;
\r
1987 taskEXIT_CRITICAL();
\r
1989 /*-----------------------------------------------------------*/
\r
1991 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1993 BaseType_t xReturn;
\r
1995 taskENTER_CRITICAL();
\r
1997 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2003 xReturn = pdFALSE;
\r
2006 taskEXIT_CRITICAL();
\r
2010 /*-----------------------------------------------------------*/
\r
2012 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2014 BaseType_t xReturn;
\r
2016 configASSERT( xQueue );
\r
2017 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2023 xReturn = pdFALSE;
\r
2027 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2028 /*-----------------------------------------------------------*/
\r
2030 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2032 BaseType_t xReturn;
\r
2034 taskENTER_CRITICAL();
\r
2036 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2042 xReturn = pdFALSE;
\r
2045 taskEXIT_CRITICAL();
\r
2049 /*-----------------------------------------------------------*/
\r
2051 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2053 BaseType_t xReturn;
\r
2055 configASSERT( xQueue );
\r
2056 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2062 xReturn = pdFALSE;
\r
2066 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2067 /*-----------------------------------------------------------*/
\r
2069 #if ( configUSE_CO_ROUTINES == 1 )
\r
2071 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2073 BaseType_t xReturn;
\r
2074 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2076 /* If the queue is already full we may have to block. A critical section
\r
2077 is required to prevent an interrupt removing something from the queue
\r
2078 between the check to see if the queue is full and blocking on the queue. */
\r
2079 portDISABLE_INTERRUPTS();
\r
2081 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2083 /* The queue is full - do we want to block or just leave without
\r
2085 if( xTicksToWait > ( TickType_t ) 0 )
\r
2087 /* As this is called from a coroutine we cannot block directly, but
\r
2088 return indicating that we need to block. */
\r
2089 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2090 portENABLE_INTERRUPTS();
\r
2091 return errQUEUE_BLOCKED;
\r
2095 portENABLE_INTERRUPTS();
\r
2096 return errQUEUE_FULL;
\r
2100 portENABLE_INTERRUPTS();
\r
2102 portDISABLE_INTERRUPTS();
\r
2104 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2106 /* There is room in the queue, copy the data into the queue. */
\r
2107 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2110 /* Were any co-routines waiting for data to become available? */
\r
2111 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2113 /* In this instance the co-routine could be placed directly
\r
2114 into the ready list as we are within a critical section.
\r
2115 Instead the same pending ready list mechanism is used as if
\r
2116 the event were caused from within an interrupt. */
\r
2117 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2119 /* The co-routine waiting has a higher priority so record
\r
2120 that a yield might be appropriate. */
\r
2121 xReturn = errQUEUE_YIELD;
\r
2125 mtCOVERAGE_TEST_MARKER();
\r
2130 mtCOVERAGE_TEST_MARKER();
\r
2135 xReturn = errQUEUE_FULL;
\r
2138 portENABLE_INTERRUPTS();
\r
2143 #endif /* configUSE_CO_ROUTINES */
\r
2144 /*-----------------------------------------------------------*/
\r
2146 #if ( configUSE_CO_ROUTINES == 1 )
\r
2148 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2150 BaseType_t xReturn;
\r
2151 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2153 /* If the queue is already empty we may have to block. A critical section
\r
2154 is required to prevent an interrupt adding something to the queue
\r
2155 between the check to see if the queue is empty and blocking on the queue. */
\r
2156 portDISABLE_INTERRUPTS();
\r
2158 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2160 /* There are no messages in the queue, do we want to block or just
\r
2161 leave with nothing? */
\r
2162 if( xTicksToWait > ( TickType_t ) 0 )
\r
2164 /* As this is a co-routine we cannot block directly, but return
\r
2165 indicating that we need to block. */
\r
2166 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2167 portENABLE_INTERRUPTS();
\r
2168 return errQUEUE_BLOCKED;
\r
2172 portENABLE_INTERRUPTS();
\r
2173 return errQUEUE_FULL;
\r
2178 mtCOVERAGE_TEST_MARKER();
\r
2181 portENABLE_INTERRUPTS();
\r
2183 portDISABLE_INTERRUPTS();
\r
2185 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2187 /* Data is available from the queue. */
\r
2188 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2189 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2191 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2195 mtCOVERAGE_TEST_MARKER();
\r
2197 --( pxQueue->uxMessagesWaiting );
\r
2198 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2202 /* Were any co-routines waiting for space to become available? */
\r
2203 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2205 /* In this instance the co-routine could be placed directly
\r
2206 into the ready list as we are within a critical section.
\r
2207 Instead the same pending ready list mechanism is used as if
\r
2208 the event were caused from within an interrupt. */
\r
2209 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2211 xReturn = errQUEUE_YIELD;
\r
2215 mtCOVERAGE_TEST_MARKER();
\r
2220 mtCOVERAGE_TEST_MARKER();
\r
2228 portENABLE_INTERRUPTS();
\r
2233 #endif /* configUSE_CO_ROUTINES */
\r
2234 /*-----------------------------------------------------------*/
\r
2236 #if ( configUSE_CO_ROUTINES == 1 )
\r
2238 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2240 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2242 /* Cannot block within an ISR so if there is no space on the queue then
\r
2243 exit without doing anything. */
\r
2244 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2246 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2248 /* We only want to wake one co-routine per ISR, so check that a
\r
2249 co-routine has not already been woken. */
\r
2250 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2252 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2254 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2260 mtCOVERAGE_TEST_MARKER();
\r
2265 mtCOVERAGE_TEST_MARKER();
\r
2270 mtCOVERAGE_TEST_MARKER();
\r
2275 mtCOVERAGE_TEST_MARKER();
\r
2278 return xCoRoutinePreviouslyWoken;
\r
2281 #endif /* configUSE_CO_ROUTINES */
\r
2282 /*-----------------------------------------------------------*/
\r
2284 #if ( configUSE_CO_ROUTINES == 1 )
\r
2286 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2288 BaseType_t xReturn;
\r
2289 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2291 /* We cannot block from an ISR, so check there is data available. If
\r
2292 not then just leave without doing anything. */
\r
2293 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2295 /* Copy the data from the queue. */
\r
2296 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2297 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2299 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2303 mtCOVERAGE_TEST_MARKER();
\r
2305 --( pxQueue->uxMessagesWaiting );
\r
2306 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2308 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2310 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2312 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2314 *pxCoRoutineWoken = pdTRUE;
\r
2318 mtCOVERAGE_TEST_MARKER();
\r
2323 mtCOVERAGE_TEST_MARKER();
\r
2328 mtCOVERAGE_TEST_MARKER();
\r
2341 #endif /* configUSE_CO_ROUTINES */
\r
2342 /*-----------------------------------------------------------*/
\r
2344 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2346 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2350 /* See if there is an empty space in the registry. A NULL name denotes
\r
2352 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2354 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2356 /* Store the information on this queue. */
\r
2357 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2358 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2360 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2365 mtCOVERAGE_TEST_MARKER();
\r
2370 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2371 /*-----------------------------------------------------------*/
\r
2373 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2375 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2379 /* See if the handle of the queue being unregistered in actually in the
\r
2381 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2383 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2385 /* Set the name to NULL to show that this slot if free again. */
\r
2386 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2391 mtCOVERAGE_TEST_MARKER();
\r
2395 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2397 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2398 /*-----------------------------------------------------------*/
\r
2400 #if ( configUSE_TIMERS == 1 )
\r
2402 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2404 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2406 /* This function should not be called by application code hence the
\r
2407 'Restricted' in its name. It is not part of the public API. It is
\r
2408 designed for use by kernel code, and has special calling requirements.
\r
2409 It can result in vListInsert() being called on a list that can only
\r
2410 possibly ever have one item in it, so the list will be fast, but even
\r
2411 so it should be called with the scheduler locked and not from a critical
\r
2414 /* Only do anything if there are no messages in the queue. This function
\r
2415 will not actually cause the task to block, just place it on a blocked
\r
2416 list. It will not block until the scheduler is unlocked - at which
\r
2417 time a yield will be performed. If an item is added to the queue while
\r
2418 the queue is locked, and the calling task blocks on the queue, then the
\r
2419 calling task will be immediately unblocked when the queue is unlocked. */
\r
2420 prvLockQueue( pxQueue );
\r
2421 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2423 /* There is nothing in the queue, block for the specified period. */
\r
2424 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2428 mtCOVERAGE_TEST_MARKER();
\r
2430 prvUnlockQueue( pxQueue );
\r
2433 #endif /* configUSE_TIMERS */
\r
2434 /*-----------------------------------------------------------*/
\r
2436 #if ( configUSE_QUEUE_SETS == 1 )
\r
2438 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2440 QueueSetHandle_t pxQueue;
\r
2442 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2447 #endif /* configUSE_QUEUE_SETS */
\r
2448 /*-----------------------------------------------------------*/
\r
2450 #if ( configUSE_QUEUE_SETS == 1 )
\r
2452 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2454 BaseType_t xReturn;
\r
2456 taskENTER_CRITICAL();
\r
2458 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2460 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2463 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2465 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2466 items in the queue/semaphore. */
\r
2471 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2475 taskEXIT_CRITICAL();
\r
2480 #endif /* configUSE_QUEUE_SETS */
\r
2481 /*-----------------------------------------------------------*/
\r
2483 #if ( configUSE_QUEUE_SETS == 1 )
\r
2485 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2487 BaseType_t xReturn;
\r
2488 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2490 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2492 /* The queue was not a member of the set. */
\r
2495 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2497 /* It is dangerous to remove a queue from a set when the queue is
\r
2498 not empty because the queue set will still hold pending events for
\r
2504 taskENTER_CRITICAL();
\r
2506 /* The queue is no longer contained in the set. */
\r
2507 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2509 taskEXIT_CRITICAL();
\r
2514 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2516 #endif /* configUSE_QUEUE_SETS */
\r
2517 /*-----------------------------------------------------------*/
\r
2519 #if ( configUSE_QUEUE_SETS == 1 )
\r
2521 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2523 QueueSetMemberHandle_t xReturn = NULL;
\r
2525 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2529 #endif /* configUSE_QUEUE_SETS */
\r
2530 /*-----------------------------------------------------------*/
\r
2532 #if ( configUSE_QUEUE_SETS == 1 )
\r
2534 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2536 QueueSetMemberHandle_t xReturn = NULL;
\r
2538 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2542 #endif /* configUSE_QUEUE_SETS */
\r
2543 /*-----------------------------------------------------------*/
\r
2545 #if ( configUSE_QUEUE_SETS == 1 )
\r
2547 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2549 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2550 BaseType_t xReturn = pdFALSE;
\r
2552 /* This function must be called form a critical section. */
\r
2554 configASSERT( pxQueueSetContainer );
\r
2555 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2557 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2559 traceQUEUE_SEND( pxQueueSetContainer );
\r
2561 /* The data copied is the handle of the queue that contains data. */
\r
2562 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2564 if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
\r
2566 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2568 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2570 /* The task waiting has a higher priority. */
\r
2575 mtCOVERAGE_TEST_MARKER();
\r
2580 mtCOVERAGE_TEST_MARKER();
\r
2585 ( pxQueueSetContainer->xTxLock )++;
\r
2590 mtCOVERAGE_TEST_MARKER();
\r
2596 #endif /* configUSE_QUEUE_SETS */
\r