2 FreeRTOS V8.2.0rc1 - Copyright (C) 2014 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
13 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
14 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
15 >>! obliged to provide the source code for proprietary components !<<
\r
16 >>! outside of the FreeRTOS kernel. !<<
\r
18 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
19 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
20 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
21 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * Having a problem? Start by reading the FAQ "My application does *
\r
28 * not run, what could be wrong?". Have you defined configASSERT()? *
\r
30 * http://www.FreeRTOS.org/FAQHelp.html *
\r
32 ***************************************************************************
\r
34 ***************************************************************************
\r
36 * FreeRTOS provides completely free yet professionally developed, *
\r
37 * robust, strictly quality controlled, supported, and cross *
\r
38 * platform software that is more than just the market leader, it *
\r
39 * is the industry's de facto standard. *
\r
41 * Help yourself get started quickly while simultaneously helping *
\r
42 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
43 * tutorial book, reference manual, or both: *
\r
44 * http://www.FreeRTOS.org/Documentation *
\r
46 ***************************************************************************
\r
48 ***************************************************************************
\r
50 * Investing in training allows your team to be as productive as *
\r
51 * possible as early as possible, lowering your overall development *
\r
52 * cost, and enabling you to bring a more robust product to market *
\r
53 * earlier than would otherwise be possible. Richard Barry is both *
\r
54 * the architect and key author of FreeRTOS, and so also the world's *
\r
55 * leading authority on what is the world's most popular real time *
\r
56 * kernel for deeply embedded MCU designs. Obtaining your training *
\r
57 * from Richard ensures your team will gain directly from his in-depth *
\r
58 * product knowledge and years of usage experience. Contact Real Time *
\r
59 * Engineers Ltd to enquire about the FreeRTOS Masterclass, presented *
\r
60 * by Richard Barry: http://www.FreeRTOS.org/contact
\r
62 ***************************************************************************
\r
64 ***************************************************************************
\r
66 * You are receiving this top quality software for free. Please play *
\r
67 * fair and reciprocate by reporting any suspected issues and *
\r
68 * participating in the community forum: *
\r
69 * http://www.FreeRTOS.org/support *
\r
73 ***************************************************************************
\r
75 http://www.FreeRTOS.org - Documentation, books, training, latest versions,
\r
76 license and Real Time Engineers Ltd. contact details.
\r
78 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
79 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
80 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
82 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
83 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
85 http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
\r
86 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
87 licenses offer ticketed support, indemnification and commercial middleware.
\r
89 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
90 engineered and independently SIL3 certified version for use in safety and
\r
91 mission critical applications that require provable dependability.
\r
99 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
100 all the API functions to use the MPU wrappers. That should only be done when
\r
101 task.h is included from an application file. */
\r
102 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
104 #include "FreeRTOS.h"
\r
108 #if ( configUSE_CO_ROUTINES == 1 )
\r
109 #include "croutine.h"
\r
112 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
113 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
114 header files above, but not in this file, in order to generate the correct
\r
115 privileged Vs unprivileged linkage and placement. */
\r
116 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
119 /* Constants used with the xRxLock and xTxLock structure members. */
\r
120 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
121 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
123 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
124 pcTail members are used as pointers into the queue storage area. When the
\r
125 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
126 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
127 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
128 names to the pcHead and pcTail structure members to ensure the readability of
\r
129 the code is maintained despite this dual use of two structure members. An
\r
130 alternative implementation would be to use a union, but use of a union is
\r
131 against the coding standard (although an exception to the standard has been
\r
132 permitted where the dual use also significantly changes the type of the
\r
133 structure member). */
\r
134 #define pxMutexHolder pcTail
\r
135 #define uxQueueType pcHead
\r
136 #define queueQUEUE_IS_MUTEX NULL
\r
138 /* Semaphores do not actually store or copy data, so have an item size of
\r
140 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
141 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
143 #if( configUSE_PREEMPTION == 0 )
\r
144 /* If the cooperative scheduler is being used then a yield should not be
\r
145 performed just because a higher priority task has been woken. */
\r
146 #define queueYIELD_IF_USING_PREEMPTION()
\r
148 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
152 * Definition of the queue used by the scheduler.
\r
153 * Items are queued by copy, not reference. See the following link for the
\r
154 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
156 typedef struct QueueDefinition
\r
158 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
159 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
160 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
162 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
164 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
165 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
168 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
169 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
171 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
172 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
173 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
175 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
176 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
178 #if ( configUSE_TRACE_FACILITY == 1 )
\r
179 UBaseType_t uxQueueNumber;
\r
180 uint8_t ucQueueType;
\r
183 #if ( configUSE_QUEUE_SETS == 1 )
\r
184 struct QueueDefinition *pxQueueSetContainer;
\r
189 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
190 name below to enable the use of older kernel aware debuggers. */
\r
191 typedef xQUEUE Queue_t;
\r
193 /*-----------------------------------------------------------*/
\r
196 * The queue registry is just a means for kernel aware debuggers to locate
\r
197 * queue structures. It has no other purpose so is an optional component.
\r
199 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
201 /* The type stored within the queue registry array. This allows a name
\r
202 to be assigned to each queue making kernel aware debugging a little
\r
203 more user friendly. */
\r
204 typedef struct QUEUE_REGISTRY_ITEM
\r
206 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
207 QueueHandle_t xHandle;
\r
208 } xQueueRegistryItem;
\r
210 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
211 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
213 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
215 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
216 The pcQueueName member of a structure being NULL is indicative of the
\r
217 array position being vacant. */
\r
218 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
220 #endif /* configQUEUE_REGISTRY_SIZE */
\r
223 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
224 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
225 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
226 * queue is locked it will instead increment the appropriate queue lock count
\r
227 * to indicate that a task may require unblocking. When the queue in unlocked
\r
228 * these lock counts are inspected, and the appropriate action taken.
\r
230 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
233 * Uses a critical section to determine if there is any data in a queue.
\r
235 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
237 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
240 * Uses a critical section to determine if there is any space in a queue.
\r
242 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
244 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
247 * Copies an item into the queue, either at the front of the queue or the
\r
248 * back of the queue.
\r
250 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
253 * Copies an item out of a queue.
\r
255 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
257 #if ( configUSE_QUEUE_SETS == 1 )
\r
259 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
260 * the queue set that the queue contains data.
\r
262 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
265 /*-----------------------------------------------------------*/
\r
268 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
269 * accessing the queue event lists.
\r
271 #define prvLockQueue( pxQueue ) \
\r
272 taskENTER_CRITICAL(); \
\r
274 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
276 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
278 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
280 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
283 taskEXIT_CRITICAL()
\r
284 /*-----------------------------------------------------------*/
\r
286 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
288 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
290 configASSERT( pxQueue );
\r
292 taskENTER_CRITICAL();
\r
294 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
295 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
296 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
297 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
298 pxQueue->xRxLock = queueUNLOCKED;
\r
299 pxQueue->xTxLock = queueUNLOCKED;
\r
301 if( xNewQueue == pdFALSE )
\r
303 /* If there are tasks blocked waiting to read from the queue, then
\r
304 the tasks will remain blocked as after this function exits the queue
\r
305 will still be empty. If there are tasks blocked waiting to write to
\r
306 the queue, then one should be unblocked as after this function exits
\r
307 it will be possible to write to it. */
\r
308 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
310 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
312 queueYIELD_IF_USING_PREEMPTION();
\r
316 mtCOVERAGE_TEST_MARKER();
\r
321 mtCOVERAGE_TEST_MARKER();
\r
326 /* Ensure the event queues start in the correct state. */
\r
327 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
328 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
331 taskEXIT_CRITICAL();
\r
333 /* A value is returned for calling semantic consistency with previous
\r
337 /*-----------------------------------------------------------*/
\r
339 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
341 Queue_t *pxNewQueue;
\r
342 size_t xQueueSizeInBytes;
\r
343 QueueHandle_t xReturn = NULL;
\r
344 int8_t *pcAllocatedBuffer;
\r
346 /* Remove compiler warnings about unused parameters should
\r
347 configUSE_TRACE_FACILITY not be set to 1. */
\r
348 ( void ) ucQueueType;
\r
350 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
352 if( uxItemSize == ( UBaseType_t ) 0 )
\r
354 /* There is not going to be a queue storage area. */
\r
355 xQueueSizeInBytes = ( size_t ) 0;
\r
359 /* The queue is one byte longer than asked for to make wrap checking
\r
361 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
364 /* Allocate the new queue structure and storage area. */
\r
365 pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
367 if( pcAllocatedBuffer != NULL )
\r
369 pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be to small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */
\r
371 if( uxItemSize == ( UBaseType_t ) 0 )
\r
373 /* No RAM was allocated for the queue storage area, but PC head
\r
374 cannot be set to NULL because NULL is used as a key to say the queue
\r
375 is used as a mutex. Therefore just set pcHead to point to the queue
\r
376 as a benign value that is known to be within the memory map. */
\r
377 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
381 /* Jump past the queue structure to find the location of the queue
\r
382 storage area - adding the padding bytes to get a better alignment. */
\r
383 pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t );
\r
386 /* Initialise the queue members as described above where the queue type
\r
388 pxNewQueue->uxLength = uxQueueLength;
\r
389 pxNewQueue->uxItemSize = uxItemSize;
\r
390 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
392 #if ( configUSE_TRACE_FACILITY == 1 )
\r
394 pxNewQueue->ucQueueType = ucQueueType;
\r
396 #endif /* configUSE_TRACE_FACILITY */
\r
398 #if( configUSE_QUEUE_SETS == 1 )
\r
400 pxNewQueue->pxQueueSetContainer = NULL;
\r
402 #endif /* configUSE_QUEUE_SETS */
\r
404 traceQUEUE_CREATE( pxNewQueue );
\r
405 xReturn = pxNewQueue;
\r
409 mtCOVERAGE_TEST_MARKER();
\r
412 configASSERT( xReturn );
\r
416 /*-----------------------------------------------------------*/
\r
418 #if ( configUSE_MUTEXES == 1 )
\r
420 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
422 Queue_t *pxNewQueue;
\r
424 /* Prevent compiler warnings about unused parameters if
\r
425 configUSE_TRACE_FACILITY does not equal 1. */
\r
426 ( void ) ucQueueType;
\r
428 /* Allocate the new queue structure. */
\r
429 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
430 if( pxNewQueue != NULL )
\r
432 /* Information required for priority inheritance. */
\r
433 pxNewQueue->pxMutexHolder = NULL;
\r
434 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
436 /* Queues used as a mutex no data is actually copied into or out
\r
438 pxNewQueue->pcWriteTo = NULL;
\r
439 pxNewQueue->u.pcReadFrom = NULL;
\r
441 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
442 an item size of 0 as nothing is actually copied into or out
\r
444 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
445 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
446 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
447 pxNewQueue->xRxLock = queueUNLOCKED;
\r
448 pxNewQueue->xTxLock = queueUNLOCKED;
\r
450 #if ( configUSE_TRACE_FACILITY == 1 )
\r
452 pxNewQueue->ucQueueType = ucQueueType;
\r
456 #if ( configUSE_QUEUE_SETS == 1 )
\r
458 pxNewQueue->pxQueueSetContainer = NULL;
\r
462 /* Ensure the event queues start with the correct state. */
\r
463 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
464 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
466 traceCREATE_MUTEX( pxNewQueue );
\r
468 /* Start with the semaphore in the expected state. */
\r
469 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
473 traceCREATE_MUTEX_FAILED();
\r
476 configASSERT( pxNewQueue );
\r
480 #endif /* configUSE_MUTEXES */
\r
481 /*-----------------------------------------------------------*/
\r
483 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
485 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
489 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
490 be called directly. Note: This is a good way of determining if the
\r
491 calling task is the mutex holder, but not a good way of determining the
\r
492 identity of the mutex holder, as the holder may change between the
\r
493 following critical section exiting and the function returning. */
\r
494 taskENTER_CRITICAL();
\r
496 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
498 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
505 taskEXIT_CRITICAL();
\r
508 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
511 /*-----------------------------------------------------------*/
\r
513 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
515 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
517 BaseType_t xReturn;
\r
518 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
520 configASSERT( pxMutex );
\r
522 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
523 change outside of this task. If this task does not hold the mutex then
\r
524 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
525 this is the only condition we are interested in it does not matter if
\r
526 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
527 mutual exclusion is required to test the pxMutexHolder variable. */
\r
528 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
530 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
532 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
533 the task handle, therefore no underflow check is required. Also,
\r
534 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
535 there can only be one, no mutual exclusion is required to modify the
\r
536 uxRecursiveCallCount member. */
\r
537 ( pxMutex->u.uxRecursiveCallCount )--;
\r
539 /* Have we unwound the call count? */
\r
540 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
542 /* Return the mutex. This will automatically unblock any other
\r
543 task that might be waiting to access the mutex. */
\r
544 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
548 mtCOVERAGE_TEST_MARKER();
\r
555 /* The mutex cannot be given because the calling task is not the
\r
559 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
565 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
566 /*-----------------------------------------------------------*/
\r
568 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
570 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
572 BaseType_t xReturn;
\r
573 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
575 configASSERT( pxMutex );
\r
577 /* Comments regarding mutual exclusion as per those within
\r
578 xQueueGiveMutexRecursive(). */
\r
580 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
582 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
584 ( pxMutex->u.uxRecursiveCallCount )++;
\r
589 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
591 /* pdPASS will only be returned if the mutex was successfully
\r
592 obtained. The calling task may have entered the Blocked state
\r
593 before reaching here. */
\r
594 if( xReturn == pdPASS )
\r
596 ( pxMutex->u.uxRecursiveCallCount )++;
\r
600 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
607 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
608 /*-----------------------------------------------------------*/
\r
610 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
612 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
614 QueueHandle_t xHandle;
\r
616 configASSERT( uxMaxCount != 0 );
\r
617 configASSERT( uxInitialCount <= uxMaxCount );
\r
619 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
621 if( xHandle != NULL )
\r
623 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
625 traceCREATE_COUNTING_SEMAPHORE();
\r
629 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
632 configASSERT( xHandle );
\r
636 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
637 /*-----------------------------------------------------------*/
\r
639 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
641 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
642 TimeOut_t xTimeOut;
\r
643 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
645 configASSERT( pxQueue );
\r
646 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
647 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
648 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
650 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
655 /* This function relaxes the coding standard somewhat to allow return
\r
656 statements within the function itself. This is done in the interest
\r
657 of execution time efficiency. */
\r
660 taskENTER_CRITICAL();
\r
662 /* Is there room on the queue now? The running task must be
\r
663 the highest priority task wanting to access the queue. If
\r
664 the head item in the queue is to be overwritten then it does
\r
665 not matter if the queue is full. */
\r
666 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
668 traceQUEUE_SEND( pxQueue );
\r
669 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
671 #if ( configUSE_QUEUE_SETS == 1 )
\r
673 if( pxQueue->pxQueueSetContainer != NULL )
\r
675 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
677 /* The queue is a member of a queue set, and posting
\r
678 to the queue set caused a higher priority task to
\r
679 unblock. A context switch is required. */
\r
680 queueYIELD_IF_USING_PREEMPTION();
\r
684 mtCOVERAGE_TEST_MARKER();
\r
689 /* If there was a task waiting for data to arrive on the
\r
690 queue then unblock it now. */
\r
691 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
693 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
695 /* The unblocked task has a priority higher than
\r
696 our own so yield immediately. Yes it is ok to
\r
697 do this from within the critical section - the
\r
698 kernel takes care of that. */
\r
699 queueYIELD_IF_USING_PREEMPTION();
\r
703 mtCOVERAGE_TEST_MARKER();
\r
706 else if( xYieldRequired != pdFALSE )
\r
708 /* This path is a special case that will only get
\r
709 executed if the task was holding multiple mutexes
\r
710 and the mutexes were given back in an order that is
\r
711 different to that in which they were taken. */
\r
712 queueYIELD_IF_USING_PREEMPTION();
\r
716 mtCOVERAGE_TEST_MARKER();
\r
720 #else /* configUSE_QUEUE_SETS */
\r
722 /* If there was a task waiting for data to arrive on the
\r
723 queue then unblock it now. */
\r
724 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
726 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
728 /* The unblocked task has a priority higher than
\r
729 our own so yield immediately. Yes it is ok to do
\r
730 this from within the critical section - the kernel
\r
731 takes care of that. */
\r
732 queueYIELD_IF_USING_PREEMPTION();
\r
736 mtCOVERAGE_TEST_MARKER();
\r
739 else if( xYieldRequired != pdFALSE )
\r
741 /* This path is a special case that will only get
\r
742 executed if the task was holding multiple mutexes and
\r
743 the mutexes were given back in an order that is
\r
744 different to that in which they were taken. */
\r
745 queueYIELD_IF_USING_PREEMPTION();
\r
749 mtCOVERAGE_TEST_MARKER();
\r
752 #endif /* configUSE_QUEUE_SETS */
\r
754 taskEXIT_CRITICAL();
\r
759 if( xTicksToWait == ( TickType_t ) 0 )
\r
761 /* The queue was full and no block time is specified (or
\r
762 the block time has expired) so leave now. */
\r
763 taskEXIT_CRITICAL();
\r
765 /* Return to the original privilege level before exiting
\r
767 traceQUEUE_SEND_FAILED( pxQueue );
\r
768 return errQUEUE_FULL;
\r
770 else if( xEntryTimeSet == pdFALSE )
\r
772 /* The queue was full and a block time was specified so
\r
773 configure the timeout structure. */
\r
774 vTaskSetTimeOutState( &xTimeOut );
\r
775 xEntryTimeSet = pdTRUE;
\r
779 /* Entry time was already set. */
\r
780 mtCOVERAGE_TEST_MARKER();
\r
784 taskEXIT_CRITICAL();
\r
786 /* Interrupts and other tasks can send to and receive from the queue
\r
787 now the critical section has been exited. */
\r
790 prvLockQueue( pxQueue );
\r
792 /* Update the timeout state to see if it has expired yet. */
\r
793 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
795 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
797 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
798 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
800 /* Unlocking the queue means queue events can effect the
\r
801 event list. It is possible that interrupts occurring now
\r
802 remove this task from the event list again - but as the
\r
803 scheduler is suspended the task will go onto the pending
\r
804 ready last instead of the actual ready list. */
\r
805 prvUnlockQueue( pxQueue );
\r
807 /* Resuming the scheduler will move tasks from the pending
\r
808 ready list into the ready list - so it is feasible that this
\r
809 task is already in a ready list before it yields - in which
\r
810 case the yield will not cause a context switch unless there
\r
811 is also a higher priority task in the pending ready list. */
\r
812 if( xTaskResumeAll() == pdFALSE )
\r
814 portYIELD_WITHIN_API();
\r
820 prvUnlockQueue( pxQueue );
\r
821 ( void ) xTaskResumeAll();
\r
826 /* The timeout has expired. */
\r
827 prvUnlockQueue( pxQueue );
\r
828 ( void ) xTaskResumeAll();
\r
830 /* Return to the original privilege level before exiting the
\r
832 traceQUEUE_SEND_FAILED( pxQueue );
\r
833 return errQUEUE_FULL;
\r
837 /*-----------------------------------------------------------*/
\r
839 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
841 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
843 BaseType_t xEntryTimeSet = pdFALSE;
\r
844 TimeOut_t xTimeOut;
\r
845 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
847 configASSERT( pxQueue );
\r
848 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
852 taskENTER_CRITICAL();
\r
854 /* Is there room on the queue now? To be running we must be
\r
855 the highest priority task wanting to access the queue. */
\r
856 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
858 traceQUEUE_SEND( pxQueue );
\r
859 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
861 /* If there was a task waiting for data to arrive on the
\r
862 queue then unblock it now. */
\r
863 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
865 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
867 /* The unblocked task has a priority higher than
\r
868 our own so yield immediately. */
\r
869 portYIELD_WITHIN_API();
\r
873 mtCOVERAGE_TEST_MARKER();
\r
878 mtCOVERAGE_TEST_MARKER();
\r
881 taskEXIT_CRITICAL();
\r
886 if( xTicksToWait == ( TickType_t ) 0 )
\r
888 taskEXIT_CRITICAL();
\r
889 return errQUEUE_FULL;
\r
891 else if( xEntryTimeSet == pdFALSE )
\r
893 vTaskSetTimeOutState( &xTimeOut );
\r
894 xEntryTimeSet = pdTRUE;
\r
898 taskEXIT_CRITICAL();
\r
900 taskENTER_CRITICAL();
\r
902 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
904 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
906 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
907 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
908 portYIELD_WITHIN_API();
\r
912 mtCOVERAGE_TEST_MARKER();
\r
917 taskEXIT_CRITICAL();
\r
918 traceQUEUE_SEND_FAILED( pxQueue );
\r
919 return errQUEUE_FULL;
\r
922 taskEXIT_CRITICAL();
\r
926 #endif /* configUSE_ALTERNATIVE_API */
\r
927 /*-----------------------------------------------------------*/
\r
929 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
931 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
933 BaseType_t xEntryTimeSet = pdFALSE;
\r
934 TimeOut_t xTimeOut;
\r
935 int8_t *pcOriginalReadPosition;
\r
936 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
938 configASSERT( pxQueue );
\r
939 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
943 taskENTER_CRITICAL();
\r
945 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
947 /* Remember our read position in case we are just peeking. */
\r
948 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
950 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
952 if( xJustPeeking == pdFALSE )
\r
954 traceQUEUE_RECEIVE( pxQueue );
\r
956 /* Data is actually being removed (not just peeked). */
\r
957 --( pxQueue->uxMessagesWaiting );
\r
959 #if ( configUSE_MUTEXES == 1 )
\r
961 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
963 /* Record the information required to implement
\r
964 priority inheritance should it become necessary. */
\r
965 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
969 mtCOVERAGE_TEST_MARKER();
\r
974 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
976 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
978 portYIELD_WITHIN_API();
\r
982 mtCOVERAGE_TEST_MARKER();
\r
988 traceQUEUE_PEEK( pxQueue );
\r
990 /* The data is not being removed, so reset our read
\r
992 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
994 /* The data is being left in the queue, so see if there are
\r
995 any other tasks waiting for the data. */
\r
996 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
998 /* Tasks that are removed from the event list will get added to
\r
999 the pending ready list as the scheduler is still suspended. */
\r
1000 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1002 /* The task waiting has a higher priority than this task. */
\r
1003 portYIELD_WITHIN_API();
\r
1007 mtCOVERAGE_TEST_MARKER();
\r
1012 mtCOVERAGE_TEST_MARKER();
\r
1016 taskEXIT_CRITICAL();
\r
1021 if( xTicksToWait == ( TickType_t ) 0 )
\r
1023 taskEXIT_CRITICAL();
\r
1024 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1025 return errQUEUE_EMPTY;
\r
1027 else if( xEntryTimeSet == pdFALSE )
\r
1029 vTaskSetTimeOutState( &xTimeOut );
\r
1030 xEntryTimeSet = pdTRUE;
\r
1034 taskEXIT_CRITICAL();
\r
1036 taskENTER_CRITICAL();
\r
1038 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1040 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1042 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1044 #if ( configUSE_MUTEXES == 1 )
\r
1046 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1048 taskENTER_CRITICAL();
\r
1050 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1052 taskEXIT_CRITICAL();
\r
1056 mtCOVERAGE_TEST_MARKER();
\r
1061 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1062 portYIELD_WITHIN_API();
\r
1066 mtCOVERAGE_TEST_MARKER();
\r
1071 taskEXIT_CRITICAL();
\r
1072 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1073 return errQUEUE_EMPTY;
\r
1076 taskEXIT_CRITICAL();
\r
1081 #endif /* configUSE_ALTERNATIVE_API */
\r
1082 /*-----------------------------------------------------------*/
\r
1084 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1086 BaseType_t xReturn;
\r
1087 UBaseType_t uxSavedInterruptStatus;
\r
1088 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1090 configASSERT( pxQueue );
\r
1091 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1092 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1094 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1095 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1096 above the maximum system call priority are kept permanently enabled, even
\r
1097 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1098 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1099 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1100 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1101 assigned a priority above the configured maximum system call priority.
\r
1102 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1103 that have been assigned a priority at or (logically) below the maximum
\r
1104 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1105 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1106 More information (albeit Cortex-M specific) is provided on the following
\r
1107 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1108 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1110 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1111 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1112 read, instead return a flag to say whether a context switch is required or
\r
1113 not (i.e. has a task with a higher priority than us been woken by this
\r
1115 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1117 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1119 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1121 /* A task can only have an inherited priority if it is a mutex
\r
1122 holder - and if there is a mutex holder then the mutex cannot be
\r
1123 given from an ISR. Therefore, unlike the xQueueGenericGive()
\r
1124 function, there is no need to determine the need for priority
\r
1125 disinheritance here or to clear the mutex holder TCB member. */
\r
1126 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1128 /* The event list is not altered if the queue is locked. This will
\r
1129 be done when the queue is unlocked later. */
\r
1130 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1132 #if ( configUSE_QUEUE_SETS == 1 )
\r
1134 if( pxQueue->pxQueueSetContainer != NULL )
\r
1136 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1138 /* The queue is a member of a queue set, and posting
\r
1139 to the queue set caused a higher priority task to
\r
1140 unblock. A context switch is required. */
\r
1141 if( pxHigherPriorityTaskWoken != NULL )
\r
1143 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1147 mtCOVERAGE_TEST_MARKER();
\r
1152 mtCOVERAGE_TEST_MARKER();
\r
1157 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1159 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1161 /* The task waiting has a higher priority so
\r
1162 record that a context switch is required. */
\r
1163 if( pxHigherPriorityTaskWoken != NULL )
\r
1165 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1169 mtCOVERAGE_TEST_MARKER();
\r
1174 mtCOVERAGE_TEST_MARKER();
\r
1179 mtCOVERAGE_TEST_MARKER();
\r
1183 #else /* configUSE_QUEUE_SETS */
\r
1185 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1187 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1189 /* The task waiting has a higher priority so record that a
\r
1190 context switch is required. */
\r
1191 if( pxHigherPriorityTaskWoken != NULL )
\r
1193 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1197 mtCOVERAGE_TEST_MARKER();
\r
1202 mtCOVERAGE_TEST_MARKER();
\r
1207 mtCOVERAGE_TEST_MARKER();
\r
1210 #endif /* configUSE_QUEUE_SETS */
\r
1214 /* Increment the lock count so the task that unlocks the queue
\r
1215 knows that data was posted while it was locked. */
\r
1216 ++( pxQueue->xTxLock );
\r
1223 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1224 xReturn = errQUEUE_FULL;
\r
1227 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1231 /*-----------------------------------------------------------*/
\r
1233 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1235 BaseType_t xReturn;
\r
1236 UBaseType_t uxSavedInterruptStatus;
\r
1237 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1239 configASSERT( pxQueue );
\r
1241 /* xQueueGenericSendFromISR() should be used in the item size is not 0. */
\r
1242 configASSERT( pxQueue->uxItemSize == 0 );
\r
1244 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1245 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1246 above the maximum system call priority are kept permanently enabled, even
\r
1247 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1248 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1249 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1250 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1251 assigned a priority above the configured maximum system call priority.
\r
1252 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1253 that have been assigned a priority at or (logically) below the maximum
\r
1254 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1255 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1256 More information (albeit Cortex-M specific) is provided on the following
\r
1257 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1258 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1260 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1261 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1262 read, instead return a flag to say whether a context switch is required or
\r
1263 not (i.e. has a task with a higher priority than us been woken by this
\r
1265 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1267 /* When the queue is used to implement a semaphore no data is ever
\r
1268 moved through the queue but it is still valid to see if the queue 'has
\r
1270 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1272 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1274 /* A task can only have an inherited priority if it is a mutex
\r
1275 holder - and if there is a mutex holder then the mutex cannot be
\r
1276 given from an ISR. Therefore, unlike the xQueueGenericGive()
\r
1277 function, there is no need to determine the need for priority
\r
1278 disinheritance here or to clear the mutex holder TCB member. */
\r
1280 ++( pxQueue->uxMessagesWaiting );
\r
1282 /* The event list is not altered if the queue is locked. This will
\r
1283 be done when the queue is unlocked later. */
\r
1284 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1286 #if ( configUSE_QUEUE_SETS == 1 )
\r
1288 if( pxQueue->pxQueueSetContainer != NULL )
\r
1290 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1292 /* The semaphore is a member of a queue set, and
\r
1293 posting to the queue set caused a higher priority
\r
1294 task to unblock. A context switch is required. */
\r
1295 if( pxHigherPriorityTaskWoken != NULL )
\r
1297 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1301 mtCOVERAGE_TEST_MARKER();
\r
1306 mtCOVERAGE_TEST_MARKER();
\r
1311 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1313 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1315 /* The task waiting has a higher priority so
\r
1316 record that a context switch is required. */
\r
1317 if( pxHigherPriorityTaskWoken != NULL )
\r
1319 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1323 mtCOVERAGE_TEST_MARKER();
\r
1328 mtCOVERAGE_TEST_MARKER();
\r
1333 mtCOVERAGE_TEST_MARKER();
\r
1337 #else /* configUSE_QUEUE_SETS */
\r
1339 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1341 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1343 /* The task waiting has a higher priority so record that a
\r
1344 context switch is required. */
\r
1345 if( pxHigherPriorityTaskWoken != NULL )
\r
1347 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1351 mtCOVERAGE_TEST_MARKER();
\r
1356 mtCOVERAGE_TEST_MARKER();
\r
1361 mtCOVERAGE_TEST_MARKER();
\r
1364 #endif /* configUSE_QUEUE_SETS */
\r
1368 /* Increment the lock count so the task that unlocks the queue
\r
1369 knows that data was posted while it was locked. */
\r
1370 ++( pxQueue->xTxLock );
\r
1377 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1378 xReturn = errQUEUE_FULL;
\r
1381 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1385 /*-----------------------------------------------------------*/
\r
1387 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1389 BaseType_t xEntryTimeSet = pdFALSE;
\r
1390 TimeOut_t xTimeOut;
\r
1391 int8_t *pcOriginalReadPosition;
\r
1392 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1394 configASSERT( pxQueue );
\r
1395 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1396 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1398 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1402 /* This function relaxes the coding standard somewhat to allow return
\r
1403 statements within the function itself. This is done in the interest
\r
1404 of execution time efficiency. */
\r
1408 taskENTER_CRITICAL();
\r
1410 /* Is there data in the queue now? To be running the calling task
\r
1411 must be the highest priority task wanting to access the queue. */
\r
1412 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1414 /* Remember the read position in case the queue is only being
\r
1416 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1418 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1420 if( xJustPeeking == pdFALSE )
\r
1422 traceQUEUE_RECEIVE( pxQueue );
\r
1424 /* Actually removing data, not just peeking. */
\r
1425 --( pxQueue->uxMessagesWaiting );
\r
1427 #if ( configUSE_MUTEXES == 1 )
\r
1429 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1431 /* Record the information required to implement
\r
1432 priority inheritance should it become necessary. */
\r
1433 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1437 mtCOVERAGE_TEST_MARKER();
\r
1440 #endif /* configUSE_MUTEXES */
\r
1442 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1444 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1446 queueYIELD_IF_USING_PREEMPTION();
\r
1450 mtCOVERAGE_TEST_MARKER();
\r
1455 mtCOVERAGE_TEST_MARKER();
\r
1460 traceQUEUE_PEEK( pxQueue );
\r
1462 /* The data is not being removed, so reset the read
\r
1464 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1466 /* The data is being left in the queue, so see if there are
\r
1467 any other tasks waiting for the data. */
\r
1468 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1470 /* Tasks that are removed from the event list will get added to
\r
1471 the pending ready list as the scheduler is still suspended. */
\r
1472 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1474 /* The task waiting has a higher priority than this task. */
\r
1475 queueYIELD_IF_USING_PREEMPTION();
\r
1479 mtCOVERAGE_TEST_MARKER();
\r
1484 mtCOVERAGE_TEST_MARKER();
\r
1488 taskEXIT_CRITICAL();
\r
1493 if( xTicksToWait == ( TickType_t ) 0 )
\r
1495 /* The queue was empty and no block time is specified (or
\r
1496 the block time has expired) so leave now. */
\r
1497 taskEXIT_CRITICAL();
\r
1498 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1499 return errQUEUE_EMPTY;
\r
1501 else if( xEntryTimeSet == pdFALSE )
\r
1503 /* The queue was empty and a block time was specified so
\r
1504 configure the timeout structure. */
\r
1505 vTaskSetTimeOutState( &xTimeOut );
\r
1506 xEntryTimeSet = pdTRUE;
\r
1510 /* Entry time was already set. */
\r
1511 mtCOVERAGE_TEST_MARKER();
\r
1515 taskEXIT_CRITICAL();
\r
1517 /* Interrupts and other tasks can send to and receive from the queue
\r
1518 now the critical section has been exited. */
\r
1520 vTaskSuspendAll();
\r
1521 prvLockQueue( pxQueue );
\r
1523 /* Update the timeout state to see if it has expired yet. */
\r
1524 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1526 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1528 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1530 #if ( configUSE_MUTEXES == 1 )
\r
1532 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1534 taskENTER_CRITICAL();
\r
1536 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1538 taskEXIT_CRITICAL();
\r
1542 mtCOVERAGE_TEST_MARKER();
\r
1547 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1548 prvUnlockQueue( pxQueue );
\r
1549 if( xTaskResumeAll() == pdFALSE )
\r
1551 portYIELD_WITHIN_API();
\r
1555 mtCOVERAGE_TEST_MARKER();
\r
1561 prvUnlockQueue( pxQueue );
\r
1562 ( void ) xTaskResumeAll();
\r
1567 prvUnlockQueue( pxQueue );
\r
1568 ( void ) xTaskResumeAll();
\r
1569 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1570 return errQUEUE_EMPTY;
\r
1574 /*-----------------------------------------------------------*/
\r
1576 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1578 BaseType_t xReturn;
\r
1579 UBaseType_t uxSavedInterruptStatus;
\r
1580 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1582 configASSERT( pxQueue );
\r
1583 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1585 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1586 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1587 above the maximum system call priority are kept permanently enabled, even
\r
1588 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1589 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1590 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1591 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1592 assigned a priority above the configured maximum system call priority.
\r
1593 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1594 that have been assigned a priority at or (logically) below the maximum
\r
1595 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1596 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1597 More information (albeit Cortex-M specific) is provided on the following
\r
1598 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1599 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1601 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1603 /* Cannot block in an ISR, so check there is data available. */
\r
1604 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1606 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1608 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1609 --( pxQueue->uxMessagesWaiting );
\r
1611 /* If the queue is locked the event list will not be modified.
\r
1612 Instead update the lock count so the task that unlocks the queue
\r
1613 will know that an ISR has removed data while the queue was
\r
1615 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1617 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1619 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1621 /* The task waiting has a higher priority than us so
\r
1622 force a context switch. */
\r
1623 if( pxHigherPriorityTaskWoken != NULL )
\r
1625 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1629 mtCOVERAGE_TEST_MARKER();
\r
1634 mtCOVERAGE_TEST_MARKER();
\r
1639 mtCOVERAGE_TEST_MARKER();
\r
1644 /* Increment the lock count so the task that unlocks the queue
\r
1645 knows that data was removed while it was locked. */
\r
1646 ++( pxQueue->xRxLock );
\r
1654 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1657 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1661 /*-----------------------------------------------------------*/
\r
1663 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1665 BaseType_t xReturn;
\r
1666 UBaseType_t uxSavedInterruptStatus;
\r
1667 int8_t *pcOriginalReadPosition;
\r
1668 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1670 configASSERT( pxQueue );
\r
1671 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1672 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1674 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1675 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1676 above the maximum system call priority are kept permanently enabled, even
\r
1677 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1678 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1679 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1680 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1681 assigned a priority above the configured maximum system call priority.
\r
1682 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1683 that have been assigned a priority at or (logically) below the maximum
\r
1684 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1685 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1686 More information (albeit Cortex-M specific) is provided on the following
\r
1687 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1688 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1690 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1692 /* Cannot block in an ISR, so check there is data available. */
\r
1693 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1695 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1697 /* Remember the read position so it can be reset as nothing is
\r
1698 actually being removed from the queue. */
\r
1699 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1700 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1701 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1708 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1711 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1715 /*-----------------------------------------------------------*/
\r
1717 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1719 UBaseType_t uxReturn;
\r
1721 configASSERT( xQueue );
\r
1723 taskENTER_CRITICAL();
\r
1725 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1727 taskEXIT_CRITICAL();
\r
1730 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1731 /*-----------------------------------------------------------*/
\r
1733 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1735 UBaseType_t uxReturn;
\r
1738 pxQueue = ( Queue_t * ) xQueue;
\r
1739 configASSERT( pxQueue );
\r
1741 taskENTER_CRITICAL();
\r
1743 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1745 taskEXIT_CRITICAL();
\r
1748 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1749 /*-----------------------------------------------------------*/
\r
1751 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1753 UBaseType_t uxReturn;
\r
1755 configASSERT( xQueue );
\r
1757 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1760 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1761 /*-----------------------------------------------------------*/
\r
1763 void vQueueDelete( QueueHandle_t xQueue )
\r
1765 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1767 configASSERT( pxQueue );
\r
1769 traceQUEUE_DELETE( pxQueue );
\r
1770 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1772 vQueueUnregisterQueue( pxQueue );
\r
1775 vPortFree( pxQueue );
\r
1777 /*-----------------------------------------------------------*/
\r
1779 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1781 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1783 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1786 #endif /* configUSE_TRACE_FACILITY */
\r
1787 /*-----------------------------------------------------------*/
\r
1789 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1791 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1793 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1796 #endif /* configUSE_TRACE_FACILITY */
\r
1797 /*-----------------------------------------------------------*/
\r
1799 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1801 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1803 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1806 #endif /* configUSE_TRACE_FACILITY */
\r
1807 /*-----------------------------------------------------------*/
\r
1809 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1811 BaseType_t xReturn = pdFALSE;
\r
1813 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1815 #if ( configUSE_MUTEXES == 1 )
\r
1817 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1819 /* The mutex is no longer being held. */
\r
1820 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1821 pxQueue->pxMutexHolder = NULL;
\r
1825 mtCOVERAGE_TEST_MARKER();
\r
1828 #endif /* configUSE_MUTEXES */
\r
1830 else if( xPosition == queueSEND_TO_BACK )
\r
1832 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1833 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1834 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1836 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1840 mtCOVERAGE_TEST_MARKER();
\r
1845 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1846 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1847 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1849 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1853 mtCOVERAGE_TEST_MARKER();
\r
1856 if( xPosition == queueOVERWRITE )
\r
1858 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1860 /* An item is not being added but overwritten, so subtract
\r
1861 one from the recorded number of items in the queue so when
\r
1862 one is added again below the number of recorded items remains
\r
1864 --( pxQueue->uxMessagesWaiting );
\r
1868 mtCOVERAGE_TEST_MARKER();
\r
1873 mtCOVERAGE_TEST_MARKER();
\r
1877 ++( pxQueue->uxMessagesWaiting );
\r
1881 /*-----------------------------------------------------------*/
\r
1883 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1885 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
1887 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1888 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1890 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1894 mtCOVERAGE_TEST_MARKER();
\r
1896 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1899 /*-----------------------------------------------------------*/
\r
1901 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1903 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1905 /* The lock counts contains the number of extra data items placed or
\r
1906 removed from the queue while the queue was locked. When a queue is
\r
1907 locked items can be added or removed, but the event lists cannot be
\r
1909 taskENTER_CRITICAL();
\r
1911 /* See if data was added to the queue while it was locked. */
\r
1912 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1914 /* Data was posted while the queue was locked. Are any tasks
\r
1915 blocked waiting for data to become available? */
\r
1916 #if ( configUSE_QUEUE_SETS == 1 )
\r
1918 if( pxQueue->pxQueueSetContainer != NULL )
\r
1920 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1922 /* The queue is a member of a queue set, and posting to
\r
1923 the queue set caused a higher priority task to unblock.
\r
1924 A context switch is required. */
\r
1925 vTaskMissedYield();
\r
1929 mtCOVERAGE_TEST_MARKER();
\r
1934 /* Tasks that are removed from the event list will get added to
\r
1935 the pending ready list as the scheduler is still suspended. */
\r
1936 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1938 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1940 /* The task waiting has a higher priority so record that a
\r
1941 context switch is required. */
\r
1942 vTaskMissedYield();
\r
1946 mtCOVERAGE_TEST_MARKER();
\r
1955 #else /* configUSE_QUEUE_SETS */
\r
1957 /* Tasks that are removed from the event list will get added to
\r
1958 the pending ready list as the scheduler is still suspended. */
\r
1959 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1961 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1963 /* The task waiting has a higher priority so record that a
\r
1964 context switch is required. */
\r
1965 vTaskMissedYield();
\r
1969 mtCOVERAGE_TEST_MARKER();
\r
1977 #endif /* configUSE_QUEUE_SETS */
\r
1979 --( pxQueue->xTxLock );
\r
1982 pxQueue->xTxLock = queueUNLOCKED;
\r
1984 taskEXIT_CRITICAL();
\r
1986 /* Do the same for the Rx lock. */
\r
1987 taskENTER_CRITICAL();
\r
1989 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1991 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1993 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1995 vTaskMissedYield();
\r
1999 mtCOVERAGE_TEST_MARKER();
\r
2002 --( pxQueue->xRxLock );
\r
2010 pxQueue->xRxLock = queueUNLOCKED;
\r
2012 taskEXIT_CRITICAL();
\r
2014 /*-----------------------------------------------------------*/
\r
2016 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2018 BaseType_t xReturn;
\r
2020 taskENTER_CRITICAL();
\r
2022 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2028 xReturn = pdFALSE;
\r
2031 taskEXIT_CRITICAL();
\r
2035 /*-----------------------------------------------------------*/
\r
2037 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2039 BaseType_t xReturn;
\r
2041 configASSERT( xQueue );
\r
2042 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2048 xReturn = pdFALSE;
\r
2052 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2053 /*-----------------------------------------------------------*/
\r
2055 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2057 BaseType_t xReturn;
\r
2059 taskENTER_CRITICAL();
\r
2061 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2067 xReturn = pdFALSE;
\r
2070 taskEXIT_CRITICAL();
\r
2074 /*-----------------------------------------------------------*/
\r
2076 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2078 BaseType_t xReturn;
\r
2080 configASSERT( xQueue );
\r
2081 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2087 xReturn = pdFALSE;
\r
2091 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2092 /*-----------------------------------------------------------*/
\r
2094 #if ( configUSE_CO_ROUTINES == 1 )
\r
2096 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2098 BaseType_t xReturn;
\r
2099 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2101 /* If the queue is already full we may have to block. A critical section
\r
2102 is required to prevent an interrupt removing something from the queue
\r
2103 between the check to see if the queue is full and blocking on the queue. */
\r
2104 portDISABLE_INTERRUPTS();
\r
2106 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2108 /* The queue is full - do we want to block or just leave without
\r
2110 if( xTicksToWait > ( TickType_t ) 0 )
\r
2112 /* As this is called from a coroutine we cannot block directly, but
\r
2113 return indicating that we need to block. */
\r
2114 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2115 portENABLE_INTERRUPTS();
\r
2116 return errQUEUE_BLOCKED;
\r
2120 portENABLE_INTERRUPTS();
\r
2121 return errQUEUE_FULL;
\r
2125 portENABLE_INTERRUPTS();
\r
2127 portDISABLE_INTERRUPTS();
\r
2129 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2131 /* There is room in the queue, copy the data into the queue. */
\r
2132 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2135 /* Were any co-routines waiting for data to become available? */
\r
2136 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2138 /* In this instance the co-routine could be placed directly
\r
2139 into the ready list as we are within a critical section.
\r
2140 Instead the same pending ready list mechanism is used as if
\r
2141 the event were caused from within an interrupt. */
\r
2142 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2144 /* The co-routine waiting has a higher priority so record
\r
2145 that a yield might be appropriate. */
\r
2146 xReturn = errQUEUE_YIELD;
\r
2150 mtCOVERAGE_TEST_MARKER();
\r
2155 mtCOVERAGE_TEST_MARKER();
\r
2160 xReturn = errQUEUE_FULL;
\r
2163 portENABLE_INTERRUPTS();
\r
2168 #endif /* configUSE_CO_ROUTINES */
\r
2169 /*-----------------------------------------------------------*/
\r
2171 #if ( configUSE_CO_ROUTINES == 1 )
\r
2173 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2175 BaseType_t xReturn;
\r
2176 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2178 /* If the queue is already empty we may have to block. A critical section
\r
2179 is required to prevent an interrupt adding something to the queue
\r
2180 between the check to see if the queue is empty and blocking on the queue. */
\r
2181 portDISABLE_INTERRUPTS();
\r
2183 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2185 /* There are no messages in the queue, do we want to block or just
\r
2186 leave with nothing? */
\r
2187 if( xTicksToWait > ( TickType_t ) 0 )
\r
2189 /* As this is a co-routine we cannot block directly, but return
\r
2190 indicating that we need to block. */
\r
2191 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2192 portENABLE_INTERRUPTS();
\r
2193 return errQUEUE_BLOCKED;
\r
2197 portENABLE_INTERRUPTS();
\r
2198 return errQUEUE_FULL;
\r
2203 mtCOVERAGE_TEST_MARKER();
\r
2206 portENABLE_INTERRUPTS();
\r
2208 portDISABLE_INTERRUPTS();
\r
2210 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2212 /* Data is available from the queue. */
\r
2213 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2214 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2216 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2220 mtCOVERAGE_TEST_MARKER();
\r
2222 --( pxQueue->uxMessagesWaiting );
\r
2223 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2227 /* Were any co-routines waiting for space to become available? */
\r
2228 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2230 /* In this instance the co-routine could be placed directly
\r
2231 into the ready list as we are within a critical section.
\r
2232 Instead the same pending ready list mechanism is used as if
\r
2233 the event were caused from within an interrupt. */
\r
2234 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2236 xReturn = errQUEUE_YIELD;
\r
2240 mtCOVERAGE_TEST_MARKER();
\r
2245 mtCOVERAGE_TEST_MARKER();
\r
2253 portENABLE_INTERRUPTS();
\r
2258 #endif /* configUSE_CO_ROUTINES */
\r
2259 /*-----------------------------------------------------------*/
\r
2261 #if ( configUSE_CO_ROUTINES == 1 )
\r
2263 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2265 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2267 /* Cannot block within an ISR so if there is no space on the queue then
\r
2268 exit without doing anything. */
\r
2269 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2271 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2273 /* We only want to wake one co-routine per ISR, so check that a
\r
2274 co-routine has not already been woken. */
\r
2275 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2277 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2279 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2285 mtCOVERAGE_TEST_MARKER();
\r
2290 mtCOVERAGE_TEST_MARKER();
\r
2295 mtCOVERAGE_TEST_MARKER();
\r
2300 mtCOVERAGE_TEST_MARKER();
\r
2303 return xCoRoutinePreviouslyWoken;
\r
2306 #endif /* configUSE_CO_ROUTINES */
\r
2307 /*-----------------------------------------------------------*/
\r
2309 #if ( configUSE_CO_ROUTINES == 1 )
\r
2311 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2313 BaseType_t xReturn;
\r
2314 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2316 /* We cannot block from an ISR, so check there is data available. If
\r
2317 not then just leave without doing anything. */
\r
2318 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2320 /* Copy the data from the queue. */
\r
2321 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2322 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2324 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2328 mtCOVERAGE_TEST_MARKER();
\r
2330 --( pxQueue->uxMessagesWaiting );
\r
2331 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2333 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2335 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2337 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2339 *pxCoRoutineWoken = pdTRUE;
\r
2343 mtCOVERAGE_TEST_MARKER();
\r
2348 mtCOVERAGE_TEST_MARKER();
\r
2353 mtCOVERAGE_TEST_MARKER();
\r
2366 #endif /* configUSE_CO_ROUTINES */
\r
2367 /*-----------------------------------------------------------*/
\r
2369 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2371 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2375 /* See if there is an empty space in the registry. A NULL name denotes
\r
2377 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2379 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2381 /* Store the information on this queue. */
\r
2382 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2383 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2385 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2390 mtCOVERAGE_TEST_MARKER();
\r
2395 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2396 /*-----------------------------------------------------------*/
\r
2398 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2400 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2404 /* See if the handle of the queue being unregistered in actually in the
\r
2406 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2408 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2410 /* Set the name to NULL to show that this slot if free again. */
\r
2411 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2416 mtCOVERAGE_TEST_MARKER();
\r
2420 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2422 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2423 /*-----------------------------------------------------------*/
\r
2425 #if ( configUSE_TIMERS == 1 )
\r
2427 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
2429 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2431 /* This function should not be called by application code hence the
\r
2432 'Restricted' in its name. It is not part of the public API. It is
\r
2433 designed for use by kernel code, and has special calling requirements.
\r
2434 It can result in vListInsert() being called on a list that can only
\r
2435 possibly ever have one item in it, so the list will be fast, but even
\r
2436 so it should be called with the scheduler locked and not from a critical
\r
2439 /* Only do anything if there are no messages in the queue. This function
\r
2440 will not actually cause the task to block, just place it on a blocked
\r
2441 list. It will not block until the scheduler is unlocked - at which
\r
2442 time a yield will be performed. If an item is added to the queue while
\r
2443 the queue is locked, and the calling task blocks on the queue, then the
\r
2444 calling task will be immediately unblocked when the queue is unlocked. */
\r
2445 prvLockQueue( pxQueue );
\r
2446 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2448 /* There is nothing in the queue, block for the specified period. */
\r
2449 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2453 mtCOVERAGE_TEST_MARKER();
\r
2455 prvUnlockQueue( pxQueue );
\r
2458 #endif /* configUSE_TIMERS */
\r
2459 /*-----------------------------------------------------------*/
\r
2461 #if ( configUSE_QUEUE_SETS == 1 )
\r
2463 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2465 QueueSetHandle_t pxQueue;
\r
2467 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2472 #endif /* configUSE_QUEUE_SETS */
\r
2473 /*-----------------------------------------------------------*/
\r
2475 #if ( configUSE_QUEUE_SETS == 1 )
\r
2477 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2479 BaseType_t xReturn;
\r
2481 taskENTER_CRITICAL();
\r
2483 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2485 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2488 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2490 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2491 items in the queue/semaphore. */
\r
2496 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2500 taskEXIT_CRITICAL();
\r
2505 #endif /* configUSE_QUEUE_SETS */
\r
2506 /*-----------------------------------------------------------*/
\r
2508 #if ( configUSE_QUEUE_SETS == 1 )
\r
2510 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2512 BaseType_t xReturn;
\r
2513 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2515 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2517 /* The queue was not a member of the set. */
\r
2520 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2522 /* It is dangerous to remove a queue from a set when the queue is
\r
2523 not empty because the queue set will still hold pending events for
\r
2529 taskENTER_CRITICAL();
\r
2531 /* The queue is no longer contained in the set. */
\r
2532 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2534 taskEXIT_CRITICAL();
\r
2539 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2541 #endif /* configUSE_QUEUE_SETS */
\r
2542 /*-----------------------------------------------------------*/
\r
2544 #if ( configUSE_QUEUE_SETS == 1 )
\r
2546 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2548 QueueSetMemberHandle_t xReturn = NULL;
\r
2550 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2554 #endif /* configUSE_QUEUE_SETS */
\r
2555 /*-----------------------------------------------------------*/
\r
2557 #if ( configUSE_QUEUE_SETS == 1 )
\r
2559 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2561 QueueSetMemberHandle_t xReturn = NULL;
\r
2563 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2567 #endif /* configUSE_QUEUE_SETS */
\r
2568 /*-----------------------------------------------------------*/
\r
2570 #if ( configUSE_QUEUE_SETS == 1 )
\r
2572 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2574 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2575 BaseType_t xReturn = pdFALSE;
\r
2577 /* This function must be called form a critical section. */
\r
2579 configASSERT( pxQueueSetContainer );
\r
2580 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2582 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2584 traceQUEUE_SEND( pxQueueSetContainer );
\r
2585 /* The data copied is the handle of the queue that contains data. */
\r
2586 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2588 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2590 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2592 /* The task waiting has a higher priority */
\r
2597 mtCOVERAGE_TEST_MARKER();
\r
2602 mtCOVERAGE_TEST_MARKER();
\r
2607 mtCOVERAGE_TEST_MARKER();
\r
2613 #endif /* configUSE_QUEUE_SETS */
\r