2 FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
13 ***************************************************************************
\r
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
15 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
16 >>! obliged to provide the source code for proprietary components !<<
\r
17 >>! outside of the FreeRTOS kernel. !<<
\r
18 ***************************************************************************
\r
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
23 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * FreeRTOS provides completely free yet professionally developed, *
\r
28 * robust, strictly quality controlled, supported, and cross *
\r
29 * platform software that is more than just the market leader, it *
\r
30 * is the industry's de facto standard. *
\r
32 * Help yourself get started quickly while simultaneously helping *
\r
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
34 * tutorial book, reference manual, or both: *
\r
35 * http://www.FreeRTOS.org/Documentation *
\r
37 ***************************************************************************
\r
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
\r
40 the FAQ page "My application does not run, what could be wrong?". Have you
\r
41 defined configASSERT()?
\r
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
\r
44 embedded software for free we request you assist our global community by
\r
45 participating in the support forum.
\r
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
\r
48 be as productive as possible as early as possible. Now you can receive
\r
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
\r
50 Ltd, and the world's leading authority on the world's leading RTOS.
\r
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
\r
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
61 licenses offer ticketed support, indemnification and commercial middleware.
\r
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
64 engineered and independently SIL3 certified version for use in safety and
\r
65 mission critical applications that require provable dependability.
\r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
74 all the API functions to use the MPU wrappers. That should only be done when
\r
75 task.h is included from an application file. */
\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
78 #include "FreeRTOS.h"
\r
82 #if ( configUSE_CO_ROUTINES == 1 )
\r
83 #include "croutine.h"
\r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
88 header files above, but not in this file, in order to generate the correct
\r
89 privileged Vs unprivileged linkage and placement. */
\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
93 /* Constants used with the xRxLock and xTxLock structure members. */
\r
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
98 pcTail members are used as pointers into the queue storage area. When the
\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
102 names to the pcHead and pcTail structure members to ensure the readability of
\r
103 the code is maintained despite this dual use of two structure members. An
\r
104 alternative implementation would be to use a union, but use of a union is
\r
105 against the coding standard (although an exception to the standard has been
\r
106 permitted where the dual use also significantly changes the type of the
\r
107 structure member). */
\r
108 #define pxMutexHolder pcTail
\r
109 #define uxQueueType pcHead
\r
110 #define queueQUEUE_IS_MUTEX NULL
\r
112 /* Semaphores do not actually store or copy data, so have an item size of
\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
117 #if( configUSE_PREEMPTION == 0 )
\r
118 /* If the cooperative scheduler is being used then a yield should not be
\r
119 performed just because a higher priority task has been woken. */
\r
120 #define queueYIELD_IF_USING_PREEMPTION()
\r
122 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
126 * Definition of the queue used by the scheduler.
\r
127 * Items are queued by copy, not reference. See the following link for the
\r
128 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
130 typedef struct QueueDefinition
\r
132 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
133 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
134 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
136 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
138 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
139 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
142 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
143 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
145 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
146 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
147 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
149 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
150 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
152 #if ( configUSE_TRACE_FACILITY == 1 )
\r
153 UBaseType_t uxQueueNumber;
\r
154 uint8_t ucQueueType;
\r
157 #if ( configUSE_QUEUE_SETS == 1 )
\r
158 struct QueueDefinition *pxQueueSetContainer;
\r
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
164 name below to enable the use of older kernel aware debuggers. */
\r
165 typedef xQUEUE Queue_t;
\r
167 /*-----------------------------------------------------------*/
\r
170 * The queue registry is just a means for kernel aware debuggers to locate
\r
171 * queue structures. It has no other purpose so is an optional component.
\r
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
175 /* The type stored within the queue registry array. This allows a name
\r
176 to be assigned to each queue making kernel aware debugging a little
\r
177 more user friendly. */
\r
178 typedef struct QUEUE_REGISTRY_ITEM
\r
180 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
181 QueueHandle_t xHandle;
\r
182 } xQueueRegistryItem;
\r
184 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
185 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
187 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
189 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
190 The pcQueueName member of a structure being NULL is indicative of the
\r
191 array position being vacant. */
\r
192 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
194 #endif /* configQUEUE_REGISTRY_SIZE */
\r
197 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
198 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
199 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
200 * queue is locked it will instead increment the appropriate queue lock count
\r
201 * to indicate that a task may require unblocking. When the queue in unlocked
\r
202 * these lock counts are inspected, and the appropriate action taken.
\r
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
207 * Uses a critical section to determine if there is any data in a queue.
\r
209 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
214 * Uses a critical section to determine if there is any space in a queue.
\r
216 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
221 * Copies an item into the queue, either at the front of the queue or the
\r
222 * back of the queue.
\r
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
227 * Copies an item out of a queue.
\r
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
231 #if ( configUSE_QUEUE_SETS == 1 )
\r
233 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
234 * the queue set that the queue contains data.
\r
236 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
239 /*-----------------------------------------------------------*/
\r
242 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
243 * accessing the queue event lists.
\r
245 #define prvLockQueue( pxQueue ) \
\r
246 taskENTER_CRITICAL(); \
\r
248 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
250 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
252 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
254 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
257 taskEXIT_CRITICAL()
\r
258 /*-----------------------------------------------------------*/
\r
260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
264 configASSERT( pxQueue );
\r
266 taskENTER_CRITICAL();
\r
268 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
269 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
270 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
271 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
272 pxQueue->xRxLock = queueUNLOCKED;
\r
273 pxQueue->xTxLock = queueUNLOCKED;
\r
275 if( xNewQueue == pdFALSE )
\r
277 /* If there are tasks blocked waiting to read from the queue, then
\r
278 the tasks will remain blocked as after this function exits the queue
\r
279 will still be empty. If there are tasks blocked waiting to write to
\r
280 the queue, then one should be unblocked as after this function exits
\r
281 it will be possible to write to it. */
\r
282 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
284 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
286 queueYIELD_IF_USING_PREEMPTION();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 mtCOVERAGE_TEST_MARKER();
\r
300 /* Ensure the event queues start in the correct state. */
\r
301 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
302 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
305 taskEXIT_CRITICAL();
\r
307 /* A value is returned for calling semantic consistency with previous
\r
311 /*-----------------------------------------------------------*/
\r
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
315 Queue_t *pxNewQueue;
\r
316 size_t xQueueSizeInBytes;
\r
317 QueueHandle_t xReturn = NULL;
\r
318 int8_t *pcAllocatedBuffer;
\r
320 /* Remove compiler warnings about unused parameters should
\r
321 configUSE_TRACE_FACILITY not be set to 1. */
\r
322 ( void ) ucQueueType;
\r
324 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
326 if( uxItemSize == ( UBaseType_t ) 0 )
\r
328 /* There is not going to be a queue storage area. */
\r
329 xQueueSizeInBytes = ( size_t ) 0;
\r
333 /* The queue is one byte longer than asked for to make wrap checking
\r
335 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
338 /* Allocate the new queue structure and storage area. */
\r
339 pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
341 if( pcAllocatedBuffer != NULL )
\r
343 pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be to small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */
\r
345 if( uxItemSize == ( UBaseType_t ) 0 )
\r
347 /* No RAM was allocated for the queue storage area, but PC head
\r
348 cannot be set to NULL because NULL is used as a key to say the queue
\r
349 is used as a mutex. Therefore just set pcHead to point to the queue
\r
350 as a benign value that is known to be within the memory map. */
\r
351 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
355 /* Jump past the queue structure to find the location of the queue
\r
356 storage area - adding the padding bytes to get a better alignment. */
\r
357 pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );
\r
360 /* Initialise the queue members as described above where the queue type
\r
362 pxNewQueue->uxLength = uxQueueLength;
\r
363 pxNewQueue->uxItemSize = uxItemSize;
\r
364 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
366 #if ( configUSE_TRACE_FACILITY == 1 )
\r
368 pxNewQueue->ucQueueType = ucQueueType;
\r
370 #endif /* configUSE_TRACE_FACILITY */
\r
372 #if( configUSE_QUEUE_SETS == 1 )
\r
374 pxNewQueue->pxQueueSetContainer = NULL;
\r
376 #endif /* configUSE_QUEUE_SETS */
\r
378 traceQUEUE_CREATE( pxNewQueue );
\r
379 xReturn = pxNewQueue;
\r
383 mtCOVERAGE_TEST_MARKER();
\r
386 configASSERT( xReturn );
\r
390 /*-----------------------------------------------------------*/
\r
392 #if ( configUSE_MUTEXES == 1 )
\r
394 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
396 Queue_t *pxNewQueue;
\r
398 /* Prevent compiler warnings about unused parameters if
\r
399 configUSE_TRACE_FACILITY does not equal 1. */
\r
400 ( void ) ucQueueType;
\r
402 /* Allocate the new queue structure. */
\r
403 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
404 if( pxNewQueue != NULL )
\r
406 /* Information required for priority inheritance. */
\r
407 pxNewQueue->pxMutexHolder = NULL;
\r
408 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
410 /* Queues used as a mutex no data is actually copied into or out
\r
412 pxNewQueue->pcWriteTo = NULL;
\r
413 pxNewQueue->u.pcReadFrom = NULL;
\r
415 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
416 an item size of 0 as nothing is actually copied into or out
\r
418 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
419 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
420 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
421 pxNewQueue->xRxLock = queueUNLOCKED;
\r
422 pxNewQueue->xTxLock = queueUNLOCKED;
\r
424 #if ( configUSE_TRACE_FACILITY == 1 )
\r
426 pxNewQueue->ucQueueType = ucQueueType;
\r
430 #if ( configUSE_QUEUE_SETS == 1 )
\r
432 pxNewQueue->pxQueueSetContainer = NULL;
\r
436 /* Ensure the event queues start with the correct state. */
\r
437 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
438 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
440 traceCREATE_MUTEX( pxNewQueue );
\r
442 /* Start with the semaphore in the expected state. */
\r
443 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
447 traceCREATE_MUTEX_FAILED();
\r
450 configASSERT( pxNewQueue );
\r
454 #endif /* configUSE_MUTEXES */
\r
455 /*-----------------------------------------------------------*/
\r
457 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
459 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
463 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
464 be called directly. Note: This is a good way of determining if the
\r
465 calling task is the mutex holder, but not a good way of determining the
\r
466 identity of the mutex holder, as the holder may change between the
\r
467 following critical section exiting and the function returning. */
\r
468 taskENTER_CRITICAL();
\r
470 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
472 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
479 taskEXIT_CRITICAL();
\r
482 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
485 /*-----------------------------------------------------------*/
\r
487 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
489 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
491 BaseType_t xReturn;
\r
492 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
494 configASSERT( pxMutex );
\r
496 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
497 change outside of this task. If this task does not hold the mutex then
\r
498 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
499 this is the only condition we are interested in it does not matter if
\r
500 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
501 mutual exclusion is required to test the pxMutexHolder variable. */
\r
502 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
504 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
506 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
507 the task handle, therefore no underflow check is required. Also,
\r
508 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
509 there can only be one, no mutual exclusion is required to modify the
\r
510 uxRecursiveCallCount member. */
\r
511 ( pxMutex->u.uxRecursiveCallCount )--;
\r
513 /* Have we unwound the call count? */
\r
514 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
516 /* Return the mutex. This will automatically unblock any other
\r
517 task that might be waiting to access the mutex. */
\r
518 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
522 mtCOVERAGE_TEST_MARKER();
\r
529 /* The mutex cannot be given because the calling task is not the
\r
533 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
539 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
540 /*-----------------------------------------------------------*/
\r
542 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
544 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
546 BaseType_t xReturn;
\r
547 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
549 configASSERT( pxMutex );
\r
551 /* Comments regarding mutual exclusion as per those within
\r
552 xQueueGiveMutexRecursive(). */
\r
554 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
556 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
558 ( pxMutex->u.uxRecursiveCallCount )++;
\r
563 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
565 /* pdPASS will only be returned if the mutex was successfully
\r
566 obtained. The calling task may have entered the Blocked state
\r
567 before reaching here. */
\r
568 if( xReturn == pdPASS )
\r
570 ( pxMutex->u.uxRecursiveCallCount )++;
\r
574 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
581 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
582 /*-----------------------------------------------------------*/
\r
584 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
586 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
588 QueueHandle_t xHandle;
\r
590 configASSERT( uxMaxCount != 0 );
\r
591 configASSERT( uxInitialCount <= uxMaxCount );
\r
593 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
595 if( xHandle != NULL )
\r
597 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
599 traceCREATE_COUNTING_SEMAPHORE();
\r
603 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
606 configASSERT( xHandle );
\r
610 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
611 /*-----------------------------------------------------------*/
\r
613 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
615 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
616 TimeOut_t xTimeOut;
\r
617 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
619 configASSERT( pxQueue );
\r
620 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
621 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
622 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
624 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
629 /* This function relaxes the coding standard somewhat to allow return
\r
630 statements within the function itself. This is done in the interest
\r
631 of execution time efficiency. */
\r
634 taskENTER_CRITICAL();
\r
636 /* Is there room on the queue now? The running task must be
\r
637 the highest priority task wanting to access the queue. If
\r
638 the head item in the queue is to be overwritten then it does
\r
639 not matter if the queue is full. */
\r
640 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
642 traceQUEUE_SEND( pxQueue );
\r
643 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
645 #if ( configUSE_QUEUE_SETS == 1 )
\r
647 if( pxQueue->pxQueueSetContainer != NULL )
\r
649 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
651 /* The queue is a member of a queue set, and posting
\r
652 to the queue set caused a higher priority task to
\r
653 unblock. A context switch is required. */
\r
654 queueYIELD_IF_USING_PREEMPTION();
\r
658 mtCOVERAGE_TEST_MARKER();
\r
663 /* If there was a task waiting for data to arrive on the
\r
664 queue then unblock it now. */
\r
665 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
667 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
669 /* The unblocked task has a priority higher than
\r
670 our own so yield immediately. Yes it is ok to
\r
671 do this from within the critical section - the
\r
672 kernel takes care of that. */
\r
673 queueYIELD_IF_USING_PREEMPTION();
\r
677 mtCOVERAGE_TEST_MARKER();
\r
680 else if( xYieldRequired != pdFALSE )
\r
682 /* This path is a special case that will only get
\r
683 executed if the task was holding multiple mutexes
\r
684 and the mutexes were given back in an order that is
\r
685 different to that in which they were taken. */
\r
686 queueYIELD_IF_USING_PREEMPTION();
\r
690 mtCOVERAGE_TEST_MARKER();
\r
694 #else /* configUSE_QUEUE_SETS */
\r
696 /* If there was a task waiting for data to arrive on the
\r
697 queue then unblock it now. */
\r
698 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
700 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
702 /* The unblocked task has a priority higher than
\r
703 our own so yield immediately. Yes it is ok to do
\r
704 this from within the critical section - the kernel
\r
705 takes care of that. */
\r
706 queueYIELD_IF_USING_PREEMPTION();
\r
710 mtCOVERAGE_TEST_MARKER();
\r
713 else if( xYieldRequired != pdFALSE )
\r
715 /* This path is a special case that will only get
\r
716 executed if the task was holding multiple mutexes and
\r
717 the mutexes were given back in an order that is
\r
718 different to that in which they were taken. */
\r
719 queueYIELD_IF_USING_PREEMPTION();
\r
723 mtCOVERAGE_TEST_MARKER();
\r
726 #endif /* configUSE_QUEUE_SETS */
\r
728 taskEXIT_CRITICAL();
\r
733 if( xTicksToWait == ( TickType_t ) 0 )
\r
735 /* The queue was full and no block time is specified (or
\r
736 the block time has expired) so leave now. */
\r
737 taskEXIT_CRITICAL();
\r
739 /* Return to the original privilege level before exiting
\r
741 traceQUEUE_SEND_FAILED( pxQueue );
\r
742 return errQUEUE_FULL;
\r
744 else if( xEntryTimeSet == pdFALSE )
\r
746 /* The queue was full and a block time was specified so
\r
747 configure the timeout structure. */
\r
748 vTaskSetTimeOutState( &xTimeOut );
\r
749 xEntryTimeSet = pdTRUE;
\r
753 /* Entry time was already set. */
\r
754 mtCOVERAGE_TEST_MARKER();
\r
758 taskEXIT_CRITICAL();
\r
760 /* Interrupts and other tasks can send to and receive from the queue
\r
761 now the critical section has been exited. */
\r
764 prvLockQueue( pxQueue );
\r
766 /* Update the timeout state to see if it has expired yet. */
\r
767 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
769 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
771 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
772 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
774 /* Unlocking the queue means queue events can effect the
\r
775 event list. It is possible that interrupts occurring now
\r
776 remove this task from the event list again - but as the
\r
777 scheduler is suspended the task will go onto the pending
\r
778 ready last instead of the actual ready list. */
\r
779 prvUnlockQueue( pxQueue );
\r
781 /* Resuming the scheduler will move tasks from the pending
\r
782 ready list into the ready list - so it is feasible that this
\r
783 task is already in a ready list before it yields - in which
\r
784 case the yield will not cause a context switch unless there
\r
785 is also a higher priority task in the pending ready list. */
\r
786 if( xTaskResumeAll() == pdFALSE )
\r
788 portYIELD_WITHIN_API();
\r
794 prvUnlockQueue( pxQueue );
\r
795 ( void ) xTaskResumeAll();
\r
800 /* The timeout has expired. */
\r
801 prvUnlockQueue( pxQueue );
\r
802 ( void ) xTaskResumeAll();
\r
804 /* Return to the original privilege level before exiting the
\r
806 traceQUEUE_SEND_FAILED( pxQueue );
\r
807 return errQUEUE_FULL;
\r
811 /*-----------------------------------------------------------*/
\r
813 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
815 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
817 BaseType_t xEntryTimeSet = pdFALSE;
\r
818 TimeOut_t xTimeOut;
\r
819 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
821 configASSERT( pxQueue );
\r
822 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
826 taskENTER_CRITICAL();
\r
828 /* Is there room on the queue now? To be running we must be
\r
829 the highest priority task wanting to access the queue. */
\r
830 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
832 traceQUEUE_SEND( pxQueue );
\r
833 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
835 /* If there was a task waiting for data to arrive on the
\r
836 queue then unblock it now. */
\r
837 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
839 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
841 /* The unblocked task has a priority higher than
\r
842 our own so yield immediately. */
\r
843 portYIELD_WITHIN_API();
\r
847 mtCOVERAGE_TEST_MARKER();
\r
852 mtCOVERAGE_TEST_MARKER();
\r
855 taskEXIT_CRITICAL();
\r
860 if( xTicksToWait == ( TickType_t ) 0 )
\r
862 taskEXIT_CRITICAL();
\r
863 return errQUEUE_FULL;
\r
865 else if( xEntryTimeSet == pdFALSE )
\r
867 vTaskSetTimeOutState( &xTimeOut );
\r
868 xEntryTimeSet = pdTRUE;
\r
872 taskEXIT_CRITICAL();
\r
874 taskENTER_CRITICAL();
\r
876 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
878 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
880 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
881 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
882 portYIELD_WITHIN_API();
\r
886 mtCOVERAGE_TEST_MARKER();
\r
891 taskEXIT_CRITICAL();
\r
892 traceQUEUE_SEND_FAILED( pxQueue );
\r
893 return errQUEUE_FULL;
\r
896 taskEXIT_CRITICAL();
\r
900 #endif /* configUSE_ALTERNATIVE_API */
\r
901 /*-----------------------------------------------------------*/
\r
903 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
905 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
907 BaseType_t xEntryTimeSet = pdFALSE;
\r
908 TimeOut_t xTimeOut;
\r
909 int8_t *pcOriginalReadPosition;
\r
910 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
912 configASSERT( pxQueue );
\r
913 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
917 taskENTER_CRITICAL();
\r
919 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
921 /* Remember our read position in case we are just peeking. */
\r
922 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
924 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
926 if( xJustPeeking == pdFALSE )
\r
928 traceQUEUE_RECEIVE( pxQueue );
\r
930 /* Data is actually being removed (not just peeked). */
\r
931 --( pxQueue->uxMessagesWaiting );
\r
933 #if ( configUSE_MUTEXES == 1 )
\r
935 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
937 /* Record the information required to implement
\r
938 priority inheritance should it become necessary. */
\r
939 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
943 mtCOVERAGE_TEST_MARKER();
\r
948 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
950 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
952 portYIELD_WITHIN_API();
\r
956 mtCOVERAGE_TEST_MARKER();
\r
962 traceQUEUE_PEEK( pxQueue );
\r
964 /* The data is not being removed, so reset our read
\r
966 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
968 /* The data is being left in the queue, so see if there are
\r
969 any other tasks waiting for the data. */
\r
970 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
972 /* Tasks that are removed from the event list will get added to
\r
973 the pending ready list as the scheduler is still suspended. */
\r
974 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
976 /* The task waiting has a higher priority than this task. */
\r
977 portYIELD_WITHIN_API();
\r
981 mtCOVERAGE_TEST_MARKER();
\r
986 mtCOVERAGE_TEST_MARKER();
\r
990 taskEXIT_CRITICAL();
\r
995 if( xTicksToWait == ( TickType_t ) 0 )
\r
997 taskEXIT_CRITICAL();
\r
998 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
999 return errQUEUE_EMPTY;
\r
1001 else if( xEntryTimeSet == pdFALSE )
\r
1003 vTaskSetTimeOutState( &xTimeOut );
\r
1004 xEntryTimeSet = pdTRUE;
\r
1008 taskEXIT_CRITICAL();
\r
1010 taskENTER_CRITICAL();
\r
1012 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1014 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1016 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1018 #if ( configUSE_MUTEXES == 1 )
\r
1020 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1022 taskENTER_CRITICAL();
\r
1024 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1026 taskEXIT_CRITICAL();
\r
1030 mtCOVERAGE_TEST_MARKER();
\r
1035 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1036 portYIELD_WITHIN_API();
\r
1040 mtCOVERAGE_TEST_MARKER();
\r
1045 taskEXIT_CRITICAL();
\r
1046 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1047 return errQUEUE_EMPTY;
\r
1050 taskEXIT_CRITICAL();
\r
1055 #endif /* configUSE_ALTERNATIVE_API */
\r
1056 /*-----------------------------------------------------------*/
\r
1058 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1060 BaseType_t xReturn;
\r
1061 UBaseType_t uxSavedInterruptStatus;
\r
1062 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1064 configASSERT( pxQueue );
\r
1065 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1066 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1068 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1069 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1070 above the maximum system call priority are kept permanently enabled, even
\r
1071 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1072 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1073 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1074 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1075 assigned a priority above the configured maximum system call priority.
\r
1076 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1077 that have been assigned a priority at or (logically) below the maximum
\r
1078 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1079 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1080 More information (albeit Cortex-M specific) is provided on the following
\r
1081 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1082 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1084 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1085 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1086 read, instead return a flag to say whether a context switch is required or
\r
1087 not (i.e. has a task with a higher priority than us been woken by this
\r
1089 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1091 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1093 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1095 /* A task can only have an inherited priority if it is a mutex
\r
1096 holder - and if there is a mutex holder then the mutex cannot be
\r
1097 given from an ISR. Therefore, unlike the xQueueGenericGive()
\r
1098 function, there is no need to determine the need for priority
\r
1099 disinheritance here or to clear the mutex holder TCB member. */
\r
1100 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1102 /* The event list is not altered if the queue is locked. This will
\r
1103 be done when the queue is unlocked later. */
\r
1104 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1106 #if ( configUSE_QUEUE_SETS == 1 )
\r
1108 if( pxQueue->pxQueueSetContainer != NULL )
\r
1110 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1112 /* The queue is a member of a queue set, and posting
\r
1113 to the queue set caused a higher priority task to
\r
1114 unblock. A context switch is required. */
\r
1115 if( pxHigherPriorityTaskWoken != NULL )
\r
1117 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1121 mtCOVERAGE_TEST_MARKER();
\r
1126 mtCOVERAGE_TEST_MARKER();
\r
1131 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1133 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1135 /* The task waiting has a higher priority so
\r
1136 record that a context switch is required. */
\r
1137 if( pxHigherPriorityTaskWoken != NULL )
\r
1139 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1143 mtCOVERAGE_TEST_MARKER();
\r
1148 mtCOVERAGE_TEST_MARKER();
\r
1153 mtCOVERAGE_TEST_MARKER();
\r
1157 #else /* configUSE_QUEUE_SETS */
\r
1159 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1161 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1163 /* The task waiting has a higher priority so record that a
\r
1164 context switch is required. */
\r
1165 if( pxHigherPriorityTaskWoken != NULL )
\r
1167 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1171 mtCOVERAGE_TEST_MARKER();
\r
1176 mtCOVERAGE_TEST_MARKER();
\r
1181 mtCOVERAGE_TEST_MARKER();
\r
1184 #endif /* configUSE_QUEUE_SETS */
\r
1188 /* Increment the lock count so the task that unlocks the queue
\r
1189 knows that data was posted while it was locked. */
\r
1190 ++( pxQueue->xTxLock );
\r
1197 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1198 xReturn = errQUEUE_FULL;
\r
1201 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1205 /*-----------------------------------------------------------*/
\r
1207 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1209 BaseType_t xReturn;
\r
1210 UBaseType_t uxSavedInterruptStatus;
\r
1211 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1213 configASSERT( pxQueue );
\r
1215 /* xQueueGenericSendFromISR() should be used in the item size is not 0. */
\r
1216 configASSERT( pxQueue->uxItemSize == 0 );
\r
1218 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1219 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1220 above the maximum system call priority are kept permanently enabled, even
\r
1221 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1222 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1223 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1224 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1225 assigned a priority above the configured maximum system call priority.
\r
1226 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1227 that have been assigned a priority at or (logically) below the maximum
\r
1228 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1229 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1230 More information (albeit Cortex-M specific) is provided on the following
\r
1231 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1232 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1234 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1235 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1236 read, instead return a flag to say whether a context switch is required or
\r
1237 not (i.e. has a task with a higher priority than us been woken by this
\r
1239 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1241 /* When the queue is used to implement a semaphore no data is ever
\r
1242 moved through the queue but it is still valid to see if the queue 'has
\r
1244 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1246 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1248 /* A task can only have an inherited priority if it is a mutex
\r
1249 holder - and if there is a mutex holder then the mutex cannot be
\r
1250 given from an ISR. Therefore, unlike the xQueueGenericGive()
\r
1251 function, there is no need to determine the need for priority
\r
1252 disinheritance here or to clear the mutex holder TCB member. */
\r
1254 ++( pxQueue->uxMessagesWaiting );
\r
1256 /* The event list is not altered if the queue is locked. This will
\r
1257 be done when the queue is unlocked later. */
\r
1258 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1260 #if ( configUSE_QUEUE_SETS == 1 )
\r
1262 if( pxQueue->pxQueueSetContainer != NULL )
\r
1264 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1266 /* The semaphore is a member of a queue set, and
\r
1267 posting to the queue set caused a higher priority
\r
1268 task to unblock. A context switch is required. */
\r
1269 if( pxHigherPriorityTaskWoken != NULL )
\r
1271 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1275 mtCOVERAGE_TEST_MARKER();
\r
1280 mtCOVERAGE_TEST_MARKER();
\r
1285 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1287 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1289 /* The task waiting has a higher priority so
\r
1290 record that a context switch is required. */
\r
1291 if( pxHigherPriorityTaskWoken != NULL )
\r
1293 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1297 mtCOVERAGE_TEST_MARKER();
\r
1302 mtCOVERAGE_TEST_MARKER();
\r
1307 mtCOVERAGE_TEST_MARKER();
\r
1311 #else /* configUSE_QUEUE_SETS */
\r
1313 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1315 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1317 /* The task waiting has a higher priority so record that a
\r
1318 context switch is required. */
\r
1319 if( pxHigherPriorityTaskWoken != NULL )
\r
1321 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1325 mtCOVERAGE_TEST_MARKER();
\r
1330 mtCOVERAGE_TEST_MARKER();
\r
1335 mtCOVERAGE_TEST_MARKER();
\r
1338 #endif /* configUSE_QUEUE_SETS */
\r
1342 /* Increment the lock count so the task that unlocks the queue
\r
1343 knows that data was posted while it was locked. */
\r
1344 ++( pxQueue->xTxLock );
\r
1351 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1352 xReturn = errQUEUE_FULL;
\r
1355 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1359 /*-----------------------------------------------------------*/
\r
1361 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1363 BaseType_t xEntryTimeSet = pdFALSE;
\r
1364 TimeOut_t xTimeOut;
\r
1365 int8_t *pcOriginalReadPosition;
\r
1366 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1368 configASSERT( pxQueue );
\r
1369 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1370 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1372 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1376 /* This function relaxes the coding standard somewhat to allow return
\r
1377 statements within the function itself. This is done in the interest
\r
1378 of execution time efficiency. */
\r
1382 taskENTER_CRITICAL();
\r
1384 /* Is there data in the queue now? To be running the calling task
\r
1385 must be the highest priority task wanting to access the queue. */
\r
1386 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1388 /* Remember the read position in case the queue is only being
\r
1390 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1392 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1394 if( xJustPeeking == pdFALSE )
\r
1396 traceQUEUE_RECEIVE( pxQueue );
\r
1398 /* Actually removing data, not just peeking. */
\r
1399 --( pxQueue->uxMessagesWaiting );
\r
1401 #if ( configUSE_MUTEXES == 1 )
\r
1403 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1405 /* Record the information required to implement
\r
1406 priority inheritance should it become necessary. */
\r
1407 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1411 mtCOVERAGE_TEST_MARKER();
\r
1414 #endif /* configUSE_MUTEXES */
\r
1416 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1418 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1420 queueYIELD_IF_USING_PREEMPTION();
\r
1424 mtCOVERAGE_TEST_MARKER();
\r
1429 mtCOVERAGE_TEST_MARKER();
\r
1434 traceQUEUE_PEEK( pxQueue );
\r
1436 /* The data is not being removed, so reset the read
\r
1438 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1440 /* The data is being left in the queue, so see if there are
\r
1441 any other tasks waiting for the data. */
\r
1442 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1444 /* Tasks that are removed from the event list will get added to
\r
1445 the pending ready list as the scheduler is still suspended. */
\r
1446 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1448 /* The task waiting has a higher priority than this task. */
\r
1449 queueYIELD_IF_USING_PREEMPTION();
\r
1453 mtCOVERAGE_TEST_MARKER();
\r
1458 mtCOVERAGE_TEST_MARKER();
\r
1462 taskEXIT_CRITICAL();
\r
1467 if( xTicksToWait == ( TickType_t ) 0 )
\r
1469 /* The queue was empty and no block time is specified (or
\r
1470 the block time has expired) so leave now. */
\r
1471 taskEXIT_CRITICAL();
\r
1472 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1473 return errQUEUE_EMPTY;
\r
1475 else if( xEntryTimeSet == pdFALSE )
\r
1477 /* The queue was empty and a block time was specified so
\r
1478 configure the timeout structure. */
\r
1479 vTaskSetTimeOutState( &xTimeOut );
\r
1480 xEntryTimeSet = pdTRUE;
\r
1484 /* Entry time was already set. */
\r
1485 mtCOVERAGE_TEST_MARKER();
\r
1489 taskEXIT_CRITICAL();
\r
1491 /* Interrupts and other tasks can send to and receive from the queue
\r
1492 now the critical section has been exited. */
\r
1494 vTaskSuspendAll();
\r
1495 prvLockQueue( pxQueue );
\r
1497 /* Update the timeout state to see if it has expired yet. */
\r
1498 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1500 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1502 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1504 #if ( configUSE_MUTEXES == 1 )
\r
1506 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1508 taskENTER_CRITICAL();
\r
1510 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1512 taskEXIT_CRITICAL();
\r
1516 mtCOVERAGE_TEST_MARKER();
\r
1521 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1522 prvUnlockQueue( pxQueue );
\r
1523 if( xTaskResumeAll() == pdFALSE )
\r
1525 portYIELD_WITHIN_API();
\r
1529 mtCOVERAGE_TEST_MARKER();
\r
1535 prvUnlockQueue( pxQueue );
\r
1536 ( void ) xTaskResumeAll();
\r
1541 prvUnlockQueue( pxQueue );
\r
1542 ( void ) xTaskResumeAll();
\r
1543 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1544 return errQUEUE_EMPTY;
\r
1548 /*-----------------------------------------------------------*/
\r
1550 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1552 BaseType_t xReturn;
\r
1553 UBaseType_t uxSavedInterruptStatus;
\r
1554 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1556 configASSERT( pxQueue );
\r
1557 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1559 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1560 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1561 above the maximum system call priority are kept permanently enabled, even
\r
1562 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1563 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1564 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1565 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1566 assigned a priority above the configured maximum system call priority.
\r
1567 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1568 that have been assigned a priority at or (logically) below the maximum
\r
1569 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1570 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1571 More information (albeit Cortex-M specific) is provided on the following
\r
1572 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1573 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1575 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1577 /* Cannot block in an ISR, so check there is data available. */
\r
1578 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1580 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1582 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1583 --( pxQueue->uxMessagesWaiting );
\r
1585 /* If the queue is locked the event list will not be modified.
\r
1586 Instead update the lock count so the task that unlocks the queue
\r
1587 will know that an ISR has removed data while the queue was
\r
1589 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1591 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1593 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1595 /* The task waiting has a higher priority than us so
\r
1596 force a context switch. */
\r
1597 if( pxHigherPriorityTaskWoken != NULL )
\r
1599 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1603 mtCOVERAGE_TEST_MARKER();
\r
1608 mtCOVERAGE_TEST_MARKER();
\r
1613 mtCOVERAGE_TEST_MARKER();
\r
1618 /* Increment the lock count so the task that unlocks the queue
\r
1619 knows that data was removed while it was locked. */
\r
1620 ++( pxQueue->xRxLock );
\r
1628 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1631 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1635 /*-----------------------------------------------------------*/
\r
1637 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1639 BaseType_t xReturn;
\r
1640 UBaseType_t uxSavedInterruptStatus;
\r
1641 int8_t *pcOriginalReadPosition;
\r
1642 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1644 configASSERT( pxQueue );
\r
1645 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1646 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1648 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1649 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1650 above the maximum system call priority are kept permanently enabled, even
\r
1651 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1652 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1653 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1654 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1655 assigned a priority above the configured maximum system call priority.
\r
1656 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1657 that have been assigned a priority at or (logically) below the maximum
\r
1658 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1659 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1660 More information (albeit Cortex-M specific) is provided on the following
\r
1661 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1662 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1664 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1666 /* Cannot block in an ISR, so check there is data available. */
\r
1667 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1669 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1671 /* Remember the read position so it can be reset as nothing is
\r
1672 actually being removed from the queue. */
\r
1673 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1674 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1675 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1682 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1685 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1689 /*-----------------------------------------------------------*/
\r
1691 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1693 UBaseType_t uxReturn;
\r
1695 configASSERT( xQueue );
\r
1697 taskENTER_CRITICAL();
\r
1699 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1701 taskEXIT_CRITICAL();
\r
1704 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1705 /*-----------------------------------------------------------*/
\r
1707 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1709 UBaseType_t uxReturn;
\r
1712 pxQueue = ( Queue_t * ) xQueue;
\r
1713 configASSERT( pxQueue );
\r
1715 taskENTER_CRITICAL();
\r
1717 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1719 taskEXIT_CRITICAL();
\r
1722 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1723 /*-----------------------------------------------------------*/
\r
1725 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1727 UBaseType_t uxReturn;
\r
1729 configASSERT( xQueue );
\r
1731 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1734 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1735 /*-----------------------------------------------------------*/
\r
1737 void vQueueDelete( QueueHandle_t xQueue )
\r
1739 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1741 configASSERT( pxQueue );
\r
1743 traceQUEUE_DELETE( pxQueue );
\r
1744 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1746 vQueueUnregisterQueue( pxQueue );
\r
1749 vPortFree( pxQueue );
\r
1751 /*-----------------------------------------------------------*/
\r
1753 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1755 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1757 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1760 #endif /* configUSE_TRACE_FACILITY */
\r
1761 /*-----------------------------------------------------------*/
\r
1763 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1765 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1767 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1770 #endif /* configUSE_TRACE_FACILITY */
\r
1771 /*-----------------------------------------------------------*/
\r
1773 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1775 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1777 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1780 #endif /* configUSE_TRACE_FACILITY */
\r
1781 /*-----------------------------------------------------------*/
\r
1783 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1785 BaseType_t xReturn = pdFALSE;
\r
1787 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1789 #if ( configUSE_MUTEXES == 1 )
\r
1791 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1793 /* The mutex is no longer being held. */
\r
1794 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1795 pxQueue->pxMutexHolder = NULL;
\r
1799 mtCOVERAGE_TEST_MARKER();
\r
1802 #endif /* configUSE_MUTEXES */
\r
1804 else if( xPosition == queueSEND_TO_BACK )
\r
1806 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1807 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1808 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1810 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1814 mtCOVERAGE_TEST_MARKER();
\r
1819 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1820 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1821 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1823 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1827 mtCOVERAGE_TEST_MARKER();
\r
1830 if( xPosition == queueOVERWRITE )
\r
1832 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1834 /* An item is not being added but overwritten, so subtract
\r
1835 one from the recorded number of items in the queue so when
\r
1836 one is added again below the number of recorded items remains
\r
1838 --( pxQueue->uxMessagesWaiting );
\r
1842 mtCOVERAGE_TEST_MARKER();
\r
1847 mtCOVERAGE_TEST_MARKER();
\r
1851 ++( pxQueue->uxMessagesWaiting );
\r
1855 /*-----------------------------------------------------------*/
\r
1857 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1859 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
1861 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1862 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1864 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1868 mtCOVERAGE_TEST_MARKER();
\r
1870 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1873 /*-----------------------------------------------------------*/
\r
1875 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1877 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1879 /* The lock counts contains the number of extra data items placed or
\r
1880 removed from the queue while the queue was locked. When a queue is
\r
1881 locked items can be added or removed, but the event lists cannot be
\r
1883 taskENTER_CRITICAL();
\r
1885 /* See if data was added to the queue while it was locked. */
\r
1886 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1888 /* Data was posted while the queue was locked. Are any tasks
\r
1889 blocked waiting for data to become available? */
\r
1890 #if ( configUSE_QUEUE_SETS == 1 )
\r
1892 if( pxQueue->pxQueueSetContainer != NULL )
\r
1894 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1896 /* The queue is a member of a queue set, and posting to
\r
1897 the queue set caused a higher priority task to unblock.
\r
1898 A context switch is required. */
\r
1899 vTaskMissedYield();
\r
1903 mtCOVERAGE_TEST_MARKER();
\r
1908 /* Tasks that are removed from the event list will get added to
\r
1909 the pending ready list as the scheduler is still suspended. */
\r
1910 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1912 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1914 /* The task waiting has a higher priority so record that a
\r
1915 context switch is required. */
\r
1916 vTaskMissedYield();
\r
1920 mtCOVERAGE_TEST_MARKER();
\r
1929 #else /* configUSE_QUEUE_SETS */
\r
1931 /* Tasks that are removed from the event list will get added to
\r
1932 the pending ready list as the scheduler is still suspended. */
\r
1933 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1935 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1937 /* The task waiting has a higher priority so record that a
\r
1938 context switch is required. */
\r
1939 vTaskMissedYield();
\r
1943 mtCOVERAGE_TEST_MARKER();
\r
1951 #endif /* configUSE_QUEUE_SETS */
\r
1953 --( pxQueue->xTxLock );
\r
1956 pxQueue->xTxLock = queueUNLOCKED;
\r
1958 taskEXIT_CRITICAL();
\r
1960 /* Do the same for the Rx lock. */
\r
1961 taskENTER_CRITICAL();
\r
1963 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1965 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1967 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1969 vTaskMissedYield();
\r
1973 mtCOVERAGE_TEST_MARKER();
\r
1976 --( pxQueue->xRxLock );
\r
1984 pxQueue->xRxLock = queueUNLOCKED;
\r
1986 taskEXIT_CRITICAL();
\r
1988 /*-----------------------------------------------------------*/
\r
1990 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1992 BaseType_t xReturn;
\r
1994 taskENTER_CRITICAL();
\r
1996 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2002 xReturn = pdFALSE;
\r
2005 taskEXIT_CRITICAL();
\r
2009 /*-----------------------------------------------------------*/
\r
2011 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2013 BaseType_t xReturn;
\r
2015 configASSERT( xQueue );
\r
2016 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2022 xReturn = pdFALSE;
\r
2026 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2027 /*-----------------------------------------------------------*/
\r
2029 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2031 BaseType_t xReturn;
\r
2033 taskENTER_CRITICAL();
\r
2035 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2041 xReturn = pdFALSE;
\r
2044 taskEXIT_CRITICAL();
\r
2048 /*-----------------------------------------------------------*/
\r
2050 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2052 BaseType_t xReturn;
\r
2054 configASSERT( xQueue );
\r
2055 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2061 xReturn = pdFALSE;
\r
2065 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2066 /*-----------------------------------------------------------*/
\r
2068 #if ( configUSE_CO_ROUTINES == 1 )
\r
2070 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2072 BaseType_t xReturn;
\r
2073 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2075 /* If the queue is already full we may have to block. A critical section
\r
2076 is required to prevent an interrupt removing something from the queue
\r
2077 between the check to see if the queue is full and blocking on the queue. */
\r
2078 portDISABLE_INTERRUPTS();
\r
2080 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2082 /* The queue is full - do we want to block or just leave without
\r
2084 if( xTicksToWait > ( TickType_t ) 0 )
\r
2086 /* As this is called from a coroutine we cannot block directly, but
\r
2087 return indicating that we need to block. */
\r
2088 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2089 portENABLE_INTERRUPTS();
\r
2090 return errQUEUE_BLOCKED;
\r
2094 portENABLE_INTERRUPTS();
\r
2095 return errQUEUE_FULL;
\r
2099 portENABLE_INTERRUPTS();
\r
2101 portDISABLE_INTERRUPTS();
\r
2103 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2105 /* There is room in the queue, copy the data into the queue. */
\r
2106 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2109 /* Were any co-routines waiting for data to become available? */
\r
2110 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2112 /* In this instance the co-routine could be placed directly
\r
2113 into the ready list as we are within a critical section.
\r
2114 Instead the same pending ready list mechanism is used as if
\r
2115 the event were caused from within an interrupt. */
\r
2116 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2118 /* The co-routine waiting has a higher priority so record
\r
2119 that a yield might be appropriate. */
\r
2120 xReturn = errQUEUE_YIELD;
\r
2124 mtCOVERAGE_TEST_MARKER();
\r
2129 mtCOVERAGE_TEST_MARKER();
\r
2134 xReturn = errQUEUE_FULL;
\r
2137 portENABLE_INTERRUPTS();
\r
2142 #endif /* configUSE_CO_ROUTINES */
\r
2143 /*-----------------------------------------------------------*/
\r
2145 #if ( configUSE_CO_ROUTINES == 1 )
\r
2147 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2149 BaseType_t xReturn;
\r
2150 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2152 /* If the queue is already empty we may have to block. A critical section
\r
2153 is required to prevent an interrupt adding something to the queue
\r
2154 between the check to see if the queue is empty and blocking on the queue. */
\r
2155 portDISABLE_INTERRUPTS();
\r
2157 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2159 /* There are no messages in the queue, do we want to block or just
\r
2160 leave with nothing? */
\r
2161 if( xTicksToWait > ( TickType_t ) 0 )
\r
2163 /* As this is a co-routine we cannot block directly, but return
\r
2164 indicating that we need to block. */
\r
2165 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2166 portENABLE_INTERRUPTS();
\r
2167 return errQUEUE_BLOCKED;
\r
2171 portENABLE_INTERRUPTS();
\r
2172 return errQUEUE_FULL;
\r
2177 mtCOVERAGE_TEST_MARKER();
\r
2180 portENABLE_INTERRUPTS();
\r
2182 portDISABLE_INTERRUPTS();
\r
2184 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2186 /* Data is available from the queue. */
\r
2187 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2188 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2190 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2194 mtCOVERAGE_TEST_MARKER();
\r
2196 --( pxQueue->uxMessagesWaiting );
\r
2197 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2201 /* Were any co-routines waiting for space to become available? */
\r
2202 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2204 /* In this instance the co-routine could be placed directly
\r
2205 into the ready list as we are within a critical section.
\r
2206 Instead the same pending ready list mechanism is used as if
\r
2207 the event were caused from within an interrupt. */
\r
2208 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2210 xReturn = errQUEUE_YIELD;
\r
2214 mtCOVERAGE_TEST_MARKER();
\r
2219 mtCOVERAGE_TEST_MARKER();
\r
2227 portENABLE_INTERRUPTS();
\r
2232 #endif /* configUSE_CO_ROUTINES */
\r
2233 /*-----------------------------------------------------------*/
\r
2235 #if ( configUSE_CO_ROUTINES == 1 )
\r
2237 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2239 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2241 /* Cannot block within an ISR so if there is no space on the queue then
\r
2242 exit without doing anything. */
\r
2243 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2245 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2247 /* We only want to wake one co-routine per ISR, so check that a
\r
2248 co-routine has not already been woken. */
\r
2249 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2251 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2253 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2259 mtCOVERAGE_TEST_MARKER();
\r
2264 mtCOVERAGE_TEST_MARKER();
\r
2269 mtCOVERAGE_TEST_MARKER();
\r
2274 mtCOVERAGE_TEST_MARKER();
\r
2277 return xCoRoutinePreviouslyWoken;
\r
2280 #endif /* configUSE_CO_ROUTINES */
\r
2281 /*-----------------------------------------------------------*/
\r
2283 #if ( configUSE_CO_ROUTINES == 1 )
\r
2285 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2287 BaseType_t xReturn;
\r
2288 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2290 /* We cannot block from an ISR, so check there is data available. If
\r
2291 not then just leave without doing anything. */
\r
2292 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2294 /* Copy the data from the queue. */
\r
2295 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2296 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2298 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2302 mtCOVERAGE_TEST_MARKER();
\r
2304 --( pxQueue->uxMessagesWaiting );
\r
2305 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2307 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2309 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2311 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2313 *pxCoRoutineWoken = pdTRUE;
\r
2317 mtCOVERAGE_TEST_MARKER();
\r
2322 mtCOVERAGE_TEST_MARKER();
\r
2327 mtCOVERAGE_TEST_MARKER();
\r
2340 #endif /* configUSE_CO_ROUTINES */
\r
2341 /*-----------------------------------------------------------*/
\r
2343 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2345 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2349 /* See if there is an empty space in the registry. A NULL name denotes
\r
2351 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2353 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2355 /* Store the information on this queue. */
\r
2356 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2357 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2359 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2364 mtCOVERAGE_TEST_MARKER();
\r
2369 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2370 /*-----------------------------------------------------------*/
\r
2372 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2374 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2378 /* See if the handle of the queue being unregistered in actually in the
\r
2380 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2382 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2384 /* Set the name to NULL to show that this slot if free again. */
\r
2385 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2390 mtCOVERAGE_TEST_MARKER();
\r
2394 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2396 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2397 /*-----------------------------------------------------------*/
\r
2399 #if ( configUSE_TIMERS == 1 )
\r
2401 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
2403 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2405 /* This function should not be called by application code hence the
\r
2406 'Restricted' in its name. It is not part of the public API. It is
\r
2407 designed for use by kernel code, and has special calling requirements.
\r
2408 It can result in vListInsert() being called on a list that can only
\r
2409 possibly ever have one item in it, so the list will be fast, but even
\r
2410 so it should be called with the scheduler locked and not from a critical
\r
2413 /* Only do anything if there are no messages in the queue. This function
\r
2414 will not actually cause the task to block, just place it on a blocked
\r
2415 list. It will not block until the scheduler is unlocked - at which
\r
2416 time a yield will be performed. If an item is added to the queue while
\r
2417 the queue is locked, and the calling task blocks on the queue, then the
\r
2418 calling task will be immediately unblocked when the queue is unlocked. */
\r
2419 prvLockQueue( pxQueue );
\r
2420 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2422 /* There is nothing in the queue, block for the specified period. */
\r
2423 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2427 mtCOVERAGE_TEST_MARKER();
\r
2429 prvUnlockQueue( pxQueue );
\r
2432 #endif /* configUSE_TIMERS */
\r
2433 /*-----------------------------------------------------------*/
\r
2435 #if ( configUSE_QUEUE_SETS == 1 )
\r
2437 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2439 QueueSetHandle_t pxQueue;
\r
2441 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2446 #endif /* configUSE_QUEUE_SETS */
\r
2447 /*-----------------------------------------------------------*/
\r
2449 #if ( configUSE_QUEUE_SETS == 1 )
\r
2451 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2453 BaseType_t xReturn;
\r
2455 taskENTER_CRITICAL();
\r
2457 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2459 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2462 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2464 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2465 items in the queue/semaphore. */
\r
2470 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2474 taskEXIT_CRITICAL();
\r
2479 #endif /* configUSE_QUEUE_SETS */
\r
2480 /*-----------------------------------------------------------*/
\r
2482 #if ( configUSE_QUEUE_SETS == 1 )
\r
2484 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2486 BaseType_t xReturn;
\r
2487 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2489 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2491 /* The queue was not a member of the set. */
\r
2494 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2496 /* It is dangerous to remove a queue from a set when the queue is
\r
2497 not empty because the queue set will still hold pending events for
\r
2503 taskENTER_CRITICAL();
\r
2505 /* The queue is no longer contained in the set. */
\r
2506 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2508 taskEXIT_CRITICAL();
\r
2513 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2515 #endif /* configUSE_QUEUE_SETS */
\r
2516 /*-----------------------------------------------------------*/
\r
2518 #if ( configUSE_QUEUE_SETS == 1 )
\r
2520 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2522 QueueSetMemberHandle_t xReturn = NULL;
\r
2524 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2528 #endif /* configUSE_QUEUE_SETS */
\r
2529 /*-----------------------------------------------------------*/
\r
2531 #if ( configUSE_QUEUE_SETS == 1 )
\r
2533 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2535 QueueSetMemberHandle_t xReturn = NULL;
\r
2537 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2541 #endif /* configUSE_QUEUE_SETS */
\r
2542 /*-----------------------------------------------------------*/
\r
2544 #if ( configUSE_QUEUE_SETS == 1 )
\r
2546 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2548 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2549 BaseType_t xReturn = pdFALSE;
\r
2551 /* This function must be called form a critical section. */
\r
2553 configASSERT( pxQueueSetContainer );
\r
2554 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2556 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2558 traceQUEUE_SEND( pxQueueSetContainer );
\r
2559 /* The data copied is the handle of the queue that contains data. */
\r
2560 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2562 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2564 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2566 /* The task waiting has a higher priority */
\r
2571 mtCOVERAGE_TEST_MARKER();
\r
2576 mtCOVERAGE_TEST_MARKER();
\r
2581 mtCOVERAGE_TEST_MARKER();
\r
2587 #endif /* configUSE_QUEUE_SETS */
\r