2 * FreeRTOS Kernel V10.0.0
\r
3 * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software. If you wish to use our Amazon
\r
14 * FreeRTOS name, please do so in a fair use way that does not cause confusion.
\r
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
18 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
19 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
23 * http://www.FreeRTOS.org
\r
24 * http://aws.amazon.com/freertos
\r
26 * 1 tab == 4 spaces!
\r
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
33 all the API functions to use the MPU wrappers. That should only be done when
\r
34 task.h is included from an application file. */
\r
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
37 #include "FreeRTOS.h"
\r
41 #if ( configUSE_CO_ROUTINES == 1 )
\r
42 #include "croutine.h"
\r
45 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
46 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
47 header files above, but not in this file, in order to generate the correct
\r
48 privileged Vs unprivileged linkage and placement. */
\r
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
52 /* Constants used with the cRxLock and cTxLock structure members. */
\r
53 #define queueUNLOCKED ( ( int8_t ) -1 )
\r
54 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
\r
56 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
57 pcTail members are used as pointers into the queue storage area. When the
\r
58 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
59 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
60 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
61 names to the pcHead and pcTail structure members to ensure the readability of
\r
62 the code is maintained despite this dual use of two structure members. An
\r
63 alternative implementation would be to use a union, but use of a union is
\r
64 against the coding standard (although an exception to the standard has been
\r
65 permitted where the dual use also significantly changes the type of the
\r
66 structure member). */
\r
67 #define pxMutexHolder pcTail
\r
68 #define uxQueueType pcHead
\r
69 #define queueQUEUE_IS_MUTEX NULL
\r
71 /* Semaphores do not actually store or copy data, so have an item size of
\r
73 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
74 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
76 #if( configUSE_PREEMPTION == 0 )
\r
77 /* If the cooperative scheduler is being used then a yield should not be
\r
78 performed just because a higher priority task has been woken. */
\r
79 #define queueYIELD_IF_USING_PREEMPTION()
\r
81 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
85 * Definition of the queue used by the scheduler.
\r
86 * Items are queued by copy, not reference. See the following link for the
\r
87 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
89 typedef struct QueueDefinition
\r
91 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
92 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
93 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
95 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
97 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
98 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
101 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
102 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
104 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
105 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
106 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
108 volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
109 volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
111 #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
112 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
\r
115 #if ( configUSE_QUEUE_SETS == 1 )
\r
116 struct QueueDefinition *pxQueueSetContainer;
\r
119 #if ( configUSE_TRACE_FACILITY == 1 )
\r
120 UBaseType_t uxQueueNumber;
\r
121 uint8_t ucQueueType;
\r
126 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
127 name below to enable the use of older kernel aware debuggers. */
\r
128 typedef xQUEUE Queue_t;
\r
130 /*-----------------------------------------------------------*/
\r
133 * The queue registry is just a means for kernel aware debuggers to locate
\r
134 * queue structures. It has no other purpose so is an optional component.
\r
136 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
138 /* The type stored within the queue registry array. This allows a name
\r
139 to be assigned to each queue making kernel aware debugging a little
\r
140 more user friendly. */
\r
141 typedef struct QUEUE_REGISTRY_ITEM
\r
143 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
144 QueueHandle_t xHandle;
\r
145 } xQueueRegistryItem;
\r
147 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
148 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
150 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
152 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
153 The pcQueueName member of a structure being NULL is indicative of the
\r
154 array position being vacant. */
\r
155 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
157 #endif /* configQUEUE_REGISTRY_SIZE */
\r
160 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
161 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
162 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
163 * queue is locked it will instead increment the appropriate queue lock count
\r
164 * to indicate that a task may require unblocking. When the queue in unlocked
\r
165 * these lock counts are inspected, and the appropriate action taken.
\r
167 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
170 * Uses a critical section to determine if there is any data in a queue.
\r
172 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
174 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
177 * Uses a critical section to determine if there is any space in a queue.
\r
179 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
181 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
184 * Copies an item into the queue, either at the front of the queue or the
\r
185 * back of the queue.
\r
187 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
190 * Copies an item out of a queue.
\r
192 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
194 #if ( configUSE_QUEUE_SETS == 1 )
\r
196 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
197 * the queue set that the queue contains data.
\r
199 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
203 * Called after a Queue_t structure has been allocated either statically or
\r
204 * dynamically to fill in the structure's members.
\r
206 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
209 * Mutexes are a special type of queue. When a mutex is created, first the
\r
210 * queue is created, then prvInitialiseMutex() is called to configure the queue
\r
213 #if( configUSE_MUTEXES == 1 )
\r
214 static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
217 #if( configUSE_MUTEXES == 1 )
\r
219 * If a task waiting for a mutex causes the mutex holder to inherit a
\r
220 * priority, but the waiting task times out, then the holder should
\r
221 * disinherit the priority - but only down to the highest priority of any
\r
222 * other tasks that are waiting for the same mutex. This function returns
\r
225 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
227 /*-----------------------------------------------------------*/
\r
230 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
231 * accessing the queue event lists.
\r
233 #define prvLockQueue( pxQueue ) \
\r
234 taskENTER_CRITICAL(); \
\r
236 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
\r
238 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
\r
240 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
\r
242 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
\r
245 taskEXIT_CRITICAL()
\r
246 /*-----------------------------------------------------------*/
\r
248 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
250 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
252 configASSERT( pxQueue );
\r
254 taskENTER_CRITICAL();
\r
256 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
257 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
258 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
259 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
260 pxQueue->cRxLock = queueUNLOCKED;
\r
261 pxQueue->cTxLock = queueUNLOCKED;
\r
263 if( xNewQueue == pdFALSE )
\r
265 /* If there are tasks blocked waiting to read from the queue, then
\r
266 the tasks will remain blocked as after this function exits the queue
\r
267 will still be empty. If there are tasks blocked waiting to write to
\r
268 the queue, then one should be unblocked as after this function exits
\r
269 it will be possible to write to it. */
\r
270 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
272 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
274 queueYIELD_IF_USING_PREEMPTION();
\r
278 mtCOVERAGE_TEST_MARKER();
\r
283 mtCOVERAGE_TEST_MARKER();
\r
288 /* Ensure the event queues start in the correct state. */
\r
289 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
290 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
293 taskEXIT_CRITICAL();
\r
295 /* A value is returned for calling semantic consistency with previous
\r
299 /*-----------------------------------------------------------*/
\r
301 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
303 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
\r
305 Queue_t *pxNewQueue;
\r
307 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
309 /* The StaticQueue_t structure and the queue storage area must be
\r
311 configASSERT( pxStaticQueue != NULL );
\r
313 /* A queue storage area should be provided if the item size is not 0, and
\r
314 should not be provided if the item size is 0. */
\r
315 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
\r
316 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
\r
318 #if( configASSERT_DEFINED == 1 )
\r
320 /* Sanity check that the size of the structure used to declare a
\r
321 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
\r
322 the real queue and semaphore structures. */
\r
323 volatile size_t xSize = sizeof( StaticQueue_t );
\r
324 configASSERT( xSize == sizeof( Queue_t ) );
\r
326 #endif /* configASSERT_DEFINED */
\r
328 /* The address of a statically allocated queue was passed in, use it.
\r
329 The address of a statically allocated storage area was also passed in
\r
330 but is already set. */
\r
331 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
333 if( pxNewQueue != NULL )
\r
335 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
337 /* Queues can be allocated wither statically or dynamically, so
\r
338 note this queue was allocated statically in case the queue is
\r
340 pxNewQueue->ucStaticallyAllocated = pdTRUE;
\r
342 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
344 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
348 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
354 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
355 /*-----------------------------------------------------------*/
\r
357 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
359 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
361 Queue_t *pxNewQueue;
\r
362 size_t xQueueSizeInBytes;
\r
363 uint8_t *pucQueueStorage;
\r
365 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
367 if( uxItemSize == ( UBaseType_t ) 0 )
\r
369 /* There is not going to be a queue storage area. */
\r
370 xQueueSizeInBytes = ( size_t ) 0;
\r
374 /* Allocate enough space to hold the maximum number of items that
\r
375 can be in the queue at any time. */
\r
376 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
379 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
381 if( pxNewQueue != NULL )
\r
383 /* Jump past the queue structure to find the location of the queue
\r
385 pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
387 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
389 /* Queues can be created either statically or dynamically, so
\r
390 note this task was created dynamically in case it is later
\r
392 pxNewQueue->ucStaticallyAllocated = pdFALSE;
\r
394 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
396 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
400 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
406 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
407 /*-----------------------------------------------------------*/
\r
409 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
\r
411 /* Remove compiler warnings about unused parameters should
\r
412 configUSE_TRACE_FACILITY not be set to 1. */
\r
413 ( void ) ucQueueType;
\r
415 if( uxItemSize == ( UBaseType_t ) 0 )
\r
417 /* No RAM was allocated for the queue storage area, but PC head cannot
\r
418 be set to NULL because NULL is used as a key to say the queue is used as
\r
419 a mutex. Therefore just set pcHead to point to the queue as a benign
\r
420 value that is known to be within the memory map. */
\r
421 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
425 /* Set the head to the start of the queue storage area. */
\r
426 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
\r
429 /* Initialise the queue members as described where the queue type is
\r
431 pxNewQueue->uxLength = uxQueueLength;
\r
432 pxNewQueue->uxItemSize = uxItemSize;
\r
433 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
435 #if ( configUSE_TRACE_FACILITY == 1 )
\r
437 pxNewQueue->ucQueueType = ucQueueType;
\r
439 #endif /* configUSE_TRACE_FACILITY */
\r
441 #if( configUSE_QUEUE_SETS == 1 )
\r
443 pxNewQueue->pxQueueSetContainer = NULL;
\r
445 #endif /* configUSE_QUEUE_SETS */
\r
447 traceQUEUE_CREATE( pxNewQueue );
\r
449 /*-----------------------------------------------------------*/
\r
451 #if( configUSE_MUTEXES == 1 )
\r
453 static void prvInitialiseMutex( Queue_t *pxNewQueue )
\r
455 if( pxNewQueue != NULL )
\r
457 /* The queue create function will set all the queue structure members
\r
458 correctly for a generic queue, but this function is creating a
\r
459 mutex. Overwrite those members that need to be set differently -
\r
460 in particular the information required for priority inheritance. */
\r
461 pxNewQueue->pxMutexHolder = NULL;
\r
462 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
464 /* In case this is a recursive mutex. */
\r
465 pxNewQueue->u.uxRecursiveCallCount = 0;
\r
467 traceCREATE_MUTEX( pxNewQueue );
\r
469 /* Start with the semaphore in the expected state. */
\r
470 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
474 traceCREATE_MUTEX_FAILED();
\r
478 #endif /* configUSE_MUTEXES */
\r
479 /*-----------------------------------------------------------*/
\r
481 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
483 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
485 Queue_t *pxNewQueue;
\r
486 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
488 pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
\r
489 prvInitialiseMutex( pxNewQueue );
\r
494 #endif /* configUSE_MUTEXES */
\r
495 /*-----------------------------------------------------------*/
\r
497 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
499 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
\r
501 Queue_t *pxNewQueue;
\r
502 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
504 /* Prevent compiler warnings about unused parameters if
\r
505 configUSE_TRACE_FACILITY does not equal 1. */
\r
506 ( void ) ucQueueType;
\r
508 pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
\r
509 prvInitialiseMutex( pxNewQueue );
\r
514 #endif /* configUSE_MUTEXES */
\r
515 /*-----------------------------------------------------------*/
\r
517 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
519 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
523 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
524 be called directly. Note: This is a good way of determining if the
\r
525 calling task is the mutex holder, but not a good way of determining the
\r
526 identity of the mutex holder, as the holder may change between the
\r
527 following critical section exiting and the function returning. */
\r
528 taskENTER_CRITICAL();
\r
530 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
532 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
539 taskEXIT_CRITICAL();
\r
542 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
545 /*-----------------------------------------------------------*/
\r
547 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
549 void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
\r
553 configASSERT( xSemaphore );
\r
555 /* Mutexes cannot be used in interrupt service routines, so the mutex
\r
556 holder should not change in an ISR, and therefore a critical section is
\r
557 not required here. */
\r
558 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
560 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
568 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
571 /*-----------------------------------------------------------*/
\r
573 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
575 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
577 BaseType_t xReturn;
\r
578 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
580 configASSERT( pxMutex );
\r
582 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
583 change outside of this task. If this task does not hold the mutex then
\r
584 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
585 this is the only condition we are interested in it does not matter if
\r
586 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
587 mutual exclusion is required to test the pxMutexHolder variable. */
\r
588 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
590 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
592 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
593 the task handle, therefore no underflow check is required. Also,
\r
594 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
595 there can only be one, no mutual exclusion is required to modify the
\r
596 uxRecursiveCallCount member. */
\r
597 ( pxMutex->u.uxRecursiveCallCount )--;
\r
599 /* Has the recursive call count unwound to 0? */
\r
600 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
602 /* Return the mutex. This will automatically unblock any other
\r
603 task that might be waiting to access the mutex. */
\r
604 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
608 mtCOVERAGE_TEST_MARKER();
\r
615 /* The mutex cannot be given because the calling task is not the
\r
619 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
625 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
626 /*-----------------------------------------------------------*/
\r
628 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
630 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
632 BaseType_t xReturn;
\r
633 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
635 configASSERT( pxMutex );
\r
637 /* Comments regarding mutual exclusion as per those within
\r
638 xQueueGiveMutexRecursive(). */
\r
640 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
642 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
644 ( pxMutex->u.uxRecursiveCallCount )++;
\r
649 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
\r
651 /* pdPASS will only be returned if the mutex was successfully
\r
652 obtained. The calling task may have entered the Blocked state
\r
653 before reaching here. */
\r
654 if( xReturn != pdFAIL )
\r
656 ( pxMutex->u.uxRecursiveCallCount )++;
\r
660 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
667 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
668 /*-----------------------------------------------------------*/
\r
670 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
672 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
\r
674 QueueHandle_t xHandle;
\r
676 configASSERT( uxMaxCount != 0 );
\r
677 configASSERT( uxInitialCount <= uxMaxCount );
\r
679 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
681 if( xHandle != NULL )
\r
683 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
685 traceCREATE_COUNTING_SEMAPHORE();
\r
689 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
695 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
696 /*-----------------------------------------------------------*/
\r
698 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
700 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
702 QueueHandle_t xHandle;
\r
704 configASSERT( uxMaxCount != 0 );
\r
705 configASSERT( uxInitialCount <= uxMaxCount );
\r
707 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
709 if( xHandle != NULL )
\r
711 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
713 traceCREATE_COUNTING_SEMAPHORE();
\r
717 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
723 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
724 /*-----------------------------------------------------------*/
\r
726 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
728 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
729 TimeOut_t xTimeOut;
\r
730 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
732 configASSERT( pxQueue );
\r
733 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
734 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
735 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
737 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
742 /* This function relaxes the coding standard somewhat to allow return
\r
743 statements within the function itself. This is done in the interest
\r
744 of execution time efficiency. */
\r
747 taskENTER_CRITICAL();
\r
749 /* Is there room on the queue now? The running task must be the
\r
750 highest priority task wanting to access the queue. If the head item
\r
751 in the queue is to be overwritten then it does not matter if the
\r
753 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
755 traceQUEUE_SEND( pxQueue );
\r
756 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
758 #if ( configUSE_QUEUE_SETS == 1 )
\r
760 if( pxQueue->pxQueueSetContainer != NULL )
\r
762 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
764 /* The queue is a member of a queue set, and posting
\r
765 to the queue set caused a higher priority task to
\r
766 unblock. A context switch is required. */
\r
767 queueYIELD_IF_USING_PREEMPTION();
\r
771 mtCOVERAGE_TEST_MARKER();
\r
776 /* If there was a task waiting for data to arrive on the
\r
777 queue then unblock it now. */
\r
778 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
780 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
782 /* The unblocked task has a priority higher than
\r
783 our own so yield immediately. Yes it is ok to
\r
784 do this from within the critical section - the
\r
785 kernel takes care of that. */
\r
786 queueYIELD_IF_USING_PREEMPTION();
\r
790 mtCOVERAGE_TEST_MARKER();
\r
793 else if( xYieldRequired != pdFALSE )
\r
795 /* This path is a special case that will only get
\r
796 executed if the task was holding multiple mutexes
\r
797 and the mutexes were given back in an order that is
\r
798 different to that in which they were taken. */
\r
799 queueYIELD_IF_USING_PREEMPTION();
\r
803 mtCOVERAGE_TEST_MARKER();
\r
807 #else /* configUSE_QUEUE_SETS */
\r
809 /* If there was a task waiting for data to arrive on the
\r
810 queue then unblock it now. */
\r
811 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
813 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
815 /* The unblocked task has a priority higher than
\r
816 our own so yield immediately. Yes it is ok to do
\r
817 this from within the critical section - the kernel
\r
818 takes care of that. */
\r
819 queueYIELD_IF_USING_PREEMPTION();
\r
823 mtCOVERAGE_TEST_MARKER();
\r
826 else if( xYieldRequired != pdFALSE )
\r
828 /* This path is a special case that will only get
\r
829 executed if the task was holding multiple mutexes and
\r
830 the mutexes were given back in an order that is
\r
831 different to that in which they were taken. */
\r
832 queueYIELD_IF_USING_PREEMPTION();
\r
836 mtCOVERAGE_TEST_MARKER();
\r
839 #endif /* configUSE_QUEUE_SETS */
\r
841 taskEXIT_CRITICAL();
\r
846 if( xTicksToWait == ( TickType_t ) 0 )
\r
848 /* The queue was full and no block time is specified (or
\r
849 the block time has expired) so leave now. */
\r
850 taskEXIT_CRITICAL();
\r
852 /* Return to the original privilege level before exiting
\r
854 traceQUEUE_SEND_FAILED( pxQueue );
\r
855 return errQUEUE_FULL;
\r
857 else if( xEntryTimeSet == pdFALSE )
\r
859 /* The queue was full and a block time was specified so
\r
860 configure the timeout structure. */
\r
861 vTaskInternalSetTimeOutState( &xTimeOut );
\r
862 xEntryTimeSet = pdTRUE;
\r
866 /* Entry time was already set. */
\r
867 mtCOVERAGE_TEST_MARKER();
\r
871 taskEXIT_CRITICAL();
\r
873 /* Interrupts and other tasks can send to and receive from the queue
\r
874 now the critical section has been exited. */
\r
877 prvLockQueue( pxQueue );
\r
879 /* Update the timeout state to see if it has expired yet. */
\r
880 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
882 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
884 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
885 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
887 /* Unlocking the queue means queue events can effect the
\r
888 event list. It is possible that interrupts occurring now
\r
889 remove this task from the event list again - but as the
\r
890 scheduler is suspended the task will go onto the pending
\r
891 ready last instead of the actual ready list. */
\r
892 prvUnlockQueue( pxQueue );
\r
894 /* Resuming the scheduler will move tasks from the pending
\r
895 ready list into the ready list - so it is feasible that this
\r
896 task is already in a ready list before it yields - in which
\r
897 case the yield will not cause a context switch unless there
\r
898 is also a higher priority task in the pending ready list. */
\r
899 if( xTaskResumeAll() == pdFALSE )
\r
901 portYIELD_WITHIN_API();
\r
907 prvUnlockQueue( pxQueue );
\r
908 ( void ) xTaskResumeAll();
\r
913 /* The timeout has expired. */
\r
914 prvUnlockQueue( pxQueue );
\r
915 ( void ) xTaskResumeAll();
\r
917 traceQUEUE_SEND_FAILED( pxQueue );
\r
918 return errQUEUE_FULL;
\r
922 /*-----------------------------------------------------------*/
\r
924 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
926 BaseType_t xReturn;
\r
927 UBaseType_t uxSavedInterruptStatus;
\r
928 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
930 configASSERT( pxQueue );
\r
931 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
932 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
934 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
935 system call (or maximum API call) interrupt priority. Interrupts that are
\r
936 above the maximum system call priority are kept permanently enabled, even
\r
937 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
938 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
939 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
940 failure if a FreeRTOS API function is called from an interrupt that has been
\r
941 assigned a priority above the configured maximum system call priority.
\r
942 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
943 that have been assigned a priority at or (logically) below the maximum
\r
944 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
945 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
946 More information (albeit Cortex-M specific) is provided on the following
\r
947 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
948 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
950 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
951 in the queue. Also don't directly wake a task that was blocked on a queue
\r
952 read, instead return a flag to say whether a context switch is required or
\r
953 not (i.e. has a task with a higher priority than us been woken by this
\r
955 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
957 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
959 const int8_t cTxLock = pxQueue->cTxLock;
\r
961 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
963 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
964 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
965 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
966 called here even though the disinherit function does not check if
\r
967 the scheduler is suspended before accessing the ready lists. */
\r
968 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
970 /* The event list is not altered if the queue is locked. This will
\r
971 be done when the queue is unlocked later. */
\r
972 if( cTxLock == queueUNLOCKED )
\r
974 #if ( configUSE_QUEUE_SETS == 1 )
\r
976 if( pxQueue->pxQueueSetContainer != NULL )
\r
978 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
\r
980 /* The queue is a member of a queue set, and posting
\r
981 to the queue set caused a higher priority task to
\r
982 unblock. A context switch is required. */
\r
983 if( pxHigherPriorityTaskWoken != NULL )
\r
985 *pxHigherPriorityTaskWoken = pdTRUE;
\r
989 mtCOVERAGE_TEST_MARKER();
\r
994 mtCOVERAGE_TEST_MARKER();
\r
999 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1001 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1003 /* The task waiting has a higher priority so
\r
1004 record that a context switch is required. */
\r
1005 if( pxHigherPriorityTaskWoken != NULL )
\r
1007 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1011 mtCOVERAGE_TEST_MARKER();
\r
1016 mtCOVERAGE_TEST_MARKER();
\r
1021 mtCOVERAGE_TEST_MARKER();
\r
1025 #else /* configUSE_QUEUE_SETS */
\r
1027 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1029 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1031 /* The task waiting has a higher priority so record that a
\r
1032 context switch is required. */
\r
1033 if( pxHigherPriorityTaskWoken != NULL )
\r
1035 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1039 mtCOVERAGE_TEST_MARKER();
\r
1044 mtCOVERAGE_TEST_MARKER();
\r
1049 mtCOVERAGE_TEST_MARKER();
\r
1052 #endif /* configUSE_QUEUE_SETS */
\r
1056 /* Increment the lock count so the task that unlocks the queue
\r
1057 knows that data was posted while it was locked. */
\r
1058 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1065 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1066 xReturn = errQUEUE_FULL;
\r
1069 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1073 /*-----------------------------------------------------------*/
\r
1075 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1077 BaseType_t xReturn;
\r
1078 UBaseType_t uxSavedInterruptStatus;
\r
1079 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1081 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1082 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1083 read, instead return a flag to say whether a context switch is required or
\r
1084 not (i.e. has a task with a higher priority than us been woken by this
\r
1087 configASSERT( pxQueue );
\r
1089 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1090 if the item size is not 0. */
\r
1091 configASSERT( pxQueue->uxItemSize == 0 );
\r
1093 /* Normally a mutex would not be given from an interrupt, especially if
\r
1094 there is a mutex holder, as priority inheritance makes no sense for an
\r
1095 interrupts, only tasks. */
\r
1096 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1098 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1099 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1100 above the maximum system call priority are kept permanently enabled, even
\r
1101 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1102 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1103 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1104 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1105 assigned a priority above the configured maximum system call priority.
\r
1106 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1107 that have been assigned a priority at or (logically) below the maximum
\r
1108 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1109 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1110 More information (albeit Cortex-M specific) is provided on the following
\r
1111 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1112 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1114 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1116 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1118 /* When the queue is used to implement a semaphore no data is ever
\r
1119 moved through the queue but it is still valid to see if the queue 'has
\r
1121 if( uxMessagesWaiting < pxQueue->uxLength )
\r
1123 const int8_t cTxLock = pxQueue->cTxLock;
\r
1125 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1127 /* A task can only have an inherited priority if it is a mutex
\r
1128 holder - and if there is a mutex holder then the mutex cannot be
\r
1129 given from an ISR. As this is the ISR version of the function it
\r
1130 can be assumed there is no mutex holder and no need to determine if
\r
1131 priority disinheritance is needed. Simply increase the count of
\r
1132 messages (semaphores) available. */
\r
1133 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
1135 /* The event list is not altered if the queue is locked. This will
\r
1136 be done when the queue is unlocked later. */
\r
1137 if( cTxLock == queueUNLOCKED )
\r
1139 #if ( configUSE_QUEUE_SETS == 1 )
\r
1141 if( pxQueue->pxQueueSetContainer != NULL )
\r
1143 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
1145 /* The semaphore is a member of a queue set, and
\r
1146 posting to the queue set caused a higher priority
\r
1147 task to unblock. A context switch is required. */
\r
1148 if( pxHigherPriorityTaskWoken != NULL )
\r
1150 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1154 mtCOVERAGE_TEST_MARKER();
\r
1159 mtCOVERAGE_TEST_MARKER();
\r
1164 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1166 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1168 /* The task waiting has a higher priority so
\r
1169 record that a context switch is required. */
\r
1170 if( pxHigherPriorityTaskWoken != NULL )
\r
1172 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1176 mtCOVERAGE_TEST_MARKER();
\r
1181 mtCOVERAGE_TEST_MARKER();
\r
1186 mtCOVERAGE_TEST_MARKER();
\r
1190 #else /* configUSE_QUEUE_SETS */
\r
1192 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1194 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1196 /* The task waiting has a higher priority so record that a
\r
1197 context switch is required. */
\r
1198 if( pxHigherPriorityTaskWoken != NULL )
\r
1200 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1204 mtCOVERAGE_TEST_MARKER();
\r
1209 mtCOVERAGE_TEST_MARKER();
\r
1214 mtCOVERAGE_TEST_MARKER();
\r
1217 #endif /* configUSE_QUEUE_SETS */
\r
1221 /* Increment the lock count so the task that unlocks the queue
\r
1222 knows that data was posted while it was locked. */
\r
1223 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1230 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1231 xReturn = errQUEUE_FULL;
\r
1234 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1238 /*-----------------------------------------------------------*/
\r
1240 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1242 BaseType_t xEntryTimeSet = pdFALSE;
\r
1243 TimeOut_t xTimeOut;
\r
1244 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1246 /* Check the pointer is not NULL. */
\r
1247 configASSERT( ( pxQueue ) );
\r
1249 /* The buffer into which data is received can only be NULL if the data size
\r
1250 is zero (so no data is copied into the buffer. */
\r
1251 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1253 /* Cannot block if the scheduler is suspended. */
\r
1254 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1256 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1261 /* This function relaxes the coding standard somewhat to allow return
\r
1262 statements within the function itself. This is done in the interest
\r
1263 of execution time efficiency. */
\r
1267 taskENTER_CRITICAL();
\r
1269 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1271 /* Is there data in the queue now? To be running the calling task
\r
1272 must be the highest priority task wanting to access the queue. */
\r
1273 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1275 /* Data available, remove one item. */
\r
1276 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1277 traceQUEUE_RECEIVE( pxQueue );
\r
1278 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1280 /* There is now space in the queue, were any tasks waiting to
\r
1281 post to the queue? If so, unblock the highest priority waiting
\r
1283 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1285 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1287 queueYIELD_IF_USING_PREEMPTION();
\r
1291 mtCOVERAGE_TEST_MARKER();
\r
1296 mtCOVERAGE_TEST_MARKER();
\r
1299 taskEXIT_CRITICAL();
\r
1304 if( xTicksToWait == ( TickType_t ) 0 )
\r
1306 /* The queue was empty and no block time is specified (or
\r
1307 the block time has expired) so leave now. */
\r
1308 taskEXIT_CRITICAL();
\r
1309 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1310 return errQUEUE_EMPTY;
\r
1312 else if( xEntryTimeSet == pdFALSE )
\r
1314 /* The queue was empty and a block time was specified so
\r
1315 configure the timeout structure. */
\r
1316 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1317 xEntryTimeSet = pdTRUE;
\r
1321 /* Entry time was already set. */
\r
1322 mtCOVERAGE_TEST_MARKER();
\r
1326 taskEXIT_CRITICAL();
\r
1328 /* Interrupts and other tasks can send to and receive from the queue
\r
1329 now the critical section has been exited. */
\r
1331 vTaskSuspendAll();
\r
1332 prvLockQueue( pxQueue );
\r
1334 /* Update the timeout state to see if it has expired yet. */
\r
1335 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1337 /* The timeout has not expired. If the queue is still empty place
\r
1338 the task on the list of tasks waiting to receive from the queue. */
\r
1339 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1341 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1342 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1343 prvUnlockQueue( pxQueue );
\r
1344 if( xTaskResumeAll() == pdFALSE )
\r
1346 portYIELD_WITHIN_API();
\r
1350 mtCOVERAGE_TEST_MARKER();
\r
1355 /* The queue contains data again. Loop back to try and read the
\r
1357 prvUnlockQueue( pxQueue );
\r
1358 ( void ) xTaskResumeAll();
\r
1363 /* Timed out. If there is no data in the queue exit, otherwise loop
\r
1364 back and attempt to read the data. */
\r
1365 prvUnlockQueue( pxQueue );
\r
1366 ( void ) xTaskResumeAll();
\r
1368 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1370 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1371 return errQUEUE_EMPTY;
\r
1375 mtCOVERAGE_TEST_MARKER();
\r
1380 /*-----------------------------------------------------------*/
\r
1382 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
1384 BaseType_t xEntryTimeSet = pdFALSE;
\r
1385 TimeOut_t xTimeOut;
\r
1386 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1388 #if( configUSE_MUTEXES == 1 )
\r
1389 BaseType_t xInheritanceOccurred = pdFALSE;
\r
1392 /* Check the queue pointer is not NULL. */
\r
1393 configASSERT( ( pxQueue ) );
\r
1395 /* Check this really is a semaphore, in which case the item size will be
\r
1397 configASSERT( pxQueue->uxItemSize == 0 );
\r
1399 /* Cannot block if the scheduler is suspended. */
\r
1400 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1402 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1407 /* This function relaxes the coding standard somewhat to allow return
\r
1408 statements within the function itself. This is done in the interest
\r
1409 of execution time efficiency. */
\r
1413 taskENTER_CRITICAL();
\r
1415 /* Semaphores are queues with an item size of 0, and where the
\r
1416 number of messages in the queue is the semaphore's count value. */
\r
1417 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
\r
1419 /* Is there data in the queue now? To be running the calling task
\r
1420 must be the highest priority task wanting to access the queue. */
\r
1421 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
\r
1423 traceQUEUE_RECEIVE( pxQueue );
\r
1425 /* Semaphores are queues with a data size of zero and where the
\r
1426 messages waiting is the semaphore's count. Reduce the count. */
\r
1427 pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
\r
1429 #if ( configUSE_MUTEXES == 1 )
\r
1431 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1433 /* Record the information required to implement
\r
1434 priority inheritance should it become necessary. */
\r
1435 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1439 mtCOVERAGE_TEST_MARKER();
\r
1442 #endif /* configUSE_MUTEXES */
\r
1444 /* Check to see if other tasks are blocked waiting to give the
\r
1445 semaphore, and if so, unblock the highest priority such task. */
\r
1446 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1448 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1450 queueYIELD_IF_USING_PREEMPTION();
\r
1454 mtCOVERAGE_TEST_MARKER();
\r
1459 mtCOVERAGE_TEST_MARKER();
\r
1462 taskEXIT_CRITICAL();
\r
1467 if( xTicksToWait == ( TickType_t ) 0 )
\r
1469 /* For inheritance to have occurred there must have been an
\r
1470 initial timeout, and an adjusted timeout cannot become 0, as
\r
1471 if it were 0 the function would have exited. */
\r
1472 #if( configUSE_MUTEXES == 1 )
\r
1474 configASSERT( xInheritanceOccurred == pdFALSE );
\r
1476 #endif /* configUSE_MUTEXES */
\r
1478 /* The semaphore count was 0 and no block time is specified
\r
1479 (or the block time has expired) so exit now. */
\r
1480 taskEXIT_CRITICAL();
\r
1481 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1482 return errQUEUE_EMPTY;
\r
1484 else if( xEntryTimeSet == pdFALSE )
\r
1486 /* The semaphore count was 0 and a block time was specified
\r
1487 so configure the timeout structure ready to block. */
\r
1488 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1489 xEntryTimeSet = pdTRUE;
\r
1493 /* Entry time was already set. */
\r
1494 mtCOVERAGE_TEST_MARKER();
\r
1498 taskEXIT_CRITICAL();
\r
1500 /* Interrupts and other tasks can give to and take from the semaphore
\r
1501 now the critical section has been exited. */
\r
1503 vTaskSuspendAll();
\r
1504 prvLockQueue( pxQueue );
\r
1506 /* Update the timeout state to see if it has expired yet. */
\r
1507 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1509 /* A block time is specified and not expired. If the semaphore
\r
1510 count is 0 then enter the Blocked state to wait for a semaphore to
\r
1511 become available. As semaphores are implemented with queues the
\r
1512 queue being empty is equivalent to the semaphore count being 0. */
\r
1513 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1515 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1517 #if ( configUSE_MUTEXES == 1 )
\r
1519 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1521 taskENTER_CRITICAL();
\r
1523 xInheritanceOccurred = xTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1525 taskEXIT_CRITICAL();
\r
1529 mtCOVERAGE_TEST_MARKER();
\r
1534 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1535 prvUnlockQueue( pxQueue );
\r
1536 if( xTaskResumeAll() == pdFALSE )
\r
1538 portYIELD_WITHIN_API();
\r
1542 mtCOVERAGE_TEST_MARKER();
\r
1547 /* There was no timeout and the semaphore count was not 0, so
\r
1548 attempt to take the semaphore again. */
\r
1549 prvUnlockQueue( pxQueue );
\r
1550 ( void ) xTaskResumeAll();
\r
1556 prvUnlockQueue( pxQueue );
\r
1557 ( void ) xTaskResumeAll();
\r
1559 /* If the semaphore count is 0 exit now as the timeout has
\r
1560 expired. Otherwise return to attempt to take the semaphore that is
\r
1561 known to be available. As semaphores are implemented by queues the
\r
1562 queue being empty is equivalent to the semaphore count being 0. */
\r
1563 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1565 #if ( configUSE_MUTEXES == 1 )
\r
1567 /* xInheritanceOccurred could only have be set if
\r
1568 pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
\r
1569 test the mutex type again to check it is actually a mutex. */
\r
1570 if( xInheritanceOccurred != pdFALSE )
\r
1572 taskENTER_CRITICAL();
\r
1574 UBaseType_t uxHighestWaitingPriority;
\r
1576 /* This task blocking on the mutex caused another
\r
1577 task to inherit this task's priority. Now this task
\r
1578 has timed out the priority should be disinherited
\r
1579 again, but only as low as the next highest priority
\r
1580 task that is waiting for the same mutex. */
\r
1581 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
\r
1582 vTaskPriorityDisinheritAfterTimeout( ( void * ) pxQueue->pxMutexHolder, uxHighestWaitingPriority );
\r
1584 taskEXIT_CRITICAL();
\r
1587 #endif /* configUSE_MUTEXES */
\r
1589 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1590 return errQUEUE_EMPTY;
\r
1594 mtCOVERAGE_TEST_MARKER();
\r
1599 /*-----------------------------------------------------------*/
\r
1601 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1603 BaseType_t xEntryTimeSet = pdFALSE;
\r
1604 TimeOut_t xTimeOut;
\r
1605 int8_t *pcOriginalReadPosition;
\r
1606 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1608 /* Check the pointer is not NULL. */
\r
1609 configASSERT( ( pxQueue ) );
\r
1611 /* The buffer into which data is received can only be NULL if the data size
\r
1612 is zero (so no data is copied into the buffer. */
\r
1613 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1615 /* Cannot block if the scheduler is suspended. */
\r
1616 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1618 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1623 /* This function relaxes the coding standard somewhat to allow return
\r
1624 statements within the function itself. This is done in the interest
\r
1625 of execution time efficiency. */
\r
1629 taskENTER_CRITICAL();
\r
1631 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1633 /* Is there data in the queue now? To be running the calling task
\r
1634 must be the highest priority task wanting to access the queue. */
\r
1635 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1637 /* Remember the read position so it can be reset after the data
\r
1638 is read from the queue as this function is only peeking the
\r
1639 data, not removing it. */
\r
1640 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1642 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1643 traceQUEUE_PEEK( pxQueue );
\r
1645 /* The data is not being removed, so reset the read pointer. */
\r
1646 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1648 /* The data is being left in the queue, so see if there are
\r
1649 any other tasks waiting for the data. */
\r
1650 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1652 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1654 /* The task waiting has a higher priority than this task. */
\r
1655 queueYIELD_IF_USING_PREEMPTION();
\r
1659 mtCOVERAGE_TEST_MARKER();
\r
1664 mtCOVERAGE_TEST_MARKER();
\r
1667 taskEXIT_CRITICAL();
\r
1672 if( xTicksToWait == ( TickType_t ) 0 )
\r
1674 /* The queue was empty and no block time is specified (or
\r
1675 the block time has expired) so leave now. */
\r
1676 taskEXIT_CRITICAL();
\r
1677 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1678 return errQUEUE_EMPTY;
\r
1680 else if( xEntryTimeSet == pdFALSE )
\r
1682 /* The queue was empty and a block time was specified so
\r
1683 configure the timeout structure ready to enter the blocked
\r
1685 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1686 xEntryTimeSet = pdTRUE;
\r
1690 /* Entry time was already set. */
\r
1691 mtCOVERAGE_TEST_MARKER();
\r
1695 taskEXIT_CRITICAL();
\r
1697 /* Interrupts and other tasks can send to and receive from the queue
\r
1698 now the critical section has been exited. */
\r
1700 vTaskSuspendAll();
\r
1701 prvLockQueue( pxQueue );
\r
1703 /* Update the timeout state to see if it has expired yet. */
\r
1704 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1706 /* Timeout has not expired yet, check to see if there is data in the
\r
1707 queue now, and if not enter the Blocked state to wait for data. */
\r
1708 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1710 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
\r
1711 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1712 prvUnlockQueue( pxQueue );
\r
1713 if( xTaskResumeAll() == pdFALSE )
\r
1715 portYIELD_WITHIN_API();
\r
1719 mtCOVERAGE_TEST_MARKER();
\r
1724 /* There is data in the queue now, so don't enter the blocked
\r
1725 state, instead return to try and obtain the data. */
\r
1726 prvUnlockQueue( pxQueue );
\r
1727 ( void ) xTaskResumeAll();
\r
1732 /* The timeout has expired. If there is still no data in the queue
\r
1733 exit, otherwise go back and try to read the data again. */
\r
1734 prvUnlockQueue( pxQueue );
\r
1735 ( void ) xTaskResumeAll();
\r
1737 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1739 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1740 return errQUEUE_EMPTY;
\r
1744 mtCOVERAGE_TEST_MARKER();
\r
1749 /*-----------------------------------------------------------*/
\r
1751 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1753 BaseType_t xReturn;
\r
1754 UBaseType_t uxSavedInterruptStatus;
\r
1755 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1757 configASSERT( pxQueue );
\r
1758 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1760 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1761 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1762 above the maximum system call priority are kept permanently enabled, even
\r
1763 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1764 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1765 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1766 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1767 assigned a priority above the configured maximum system call priority.
\r
1768 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1769 that have been assigned a priority at or (logically) below the maximum
\r
1770 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1771 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1772 More information (albeit Cortex-M specific) is provided on the following
\r
1773 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1774 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1776 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1778 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1780 /* Cannot block in an ISR, so check there is data available. */
\r
1781 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1783 const int8_t cRxLock = pxQueue->cRxLock;
\r
1785 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1787 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1788 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1790 /* If the queue is locked the event list will not be modified.
\r
1791 Instead update the lock count so the task that unlocks the queue
\r
1792 will know that an ISR has removed data while the queue was
\r
1794 if( cRxLock == queueUNLOCKED )
\r
1796 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1798 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1800 /* The task waiting has a higher priority than us so
\r
1801 force a context switch. */
\r
1802 if( pxHigherPriorityTaskWoken != NULL )
\r
1804 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1808 mtCOVERAGE_TEST_MARKER();
\r
1813 mtCOVERAGE_TEST_MARKER();
\r
1818 mtCOVERAGE_TEST_MARKER();
\r
1823 /* Increment the lock count so the task that unlocks the queue
\r
1824 knows that data was removed while it was locked. */
\r
1825 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
\r
1833 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1836 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1840 /*-----------------------------------------------------------*/
\r
1842 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1844 BaseType_t xReturn;
\r
1845 UBaseType_t uxSavedInterruptStatus;
\r
1846 int8_t *pcOriginalReadPosition;
\r
1847 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1849 configASSERT( pxQueue );
\r
1850 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1851 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1853 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1854 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1855 above the maximum system call priority are kept permanently enabled, even
\r
1856 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1857 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1858 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1859 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1860 assigned a priority above the configured maximum system call priority.
\r
1861 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1862 that have been assigned a priority at or (logically) below the maximum
\r
1863 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1864 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1865 More information (albeit Cortex-M specific) is provided on the following
\r
1866 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1867 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1869 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1871 /* Cannot block in an ISR, so check there is data available. */
\r
1872 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1874 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1876 /* Remember the read position so it can be reset as nothing is
\r
1877 actually being removed from the queue. */
\r
1878 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1879 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1880 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1887 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1890 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1894 /*-----------------------------------------------------------*/
\r
1896 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1898 UBaseType_t uxReturn;
\r
1900 configASSERT( xQueue );
\r
1902 taskENTER_CRITICAL();
\r
1904 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1906 taskEXIT_CRITICAL();
\r
1909 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1910 /*-----------------------------------------------------------*/
\r
1912 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1914 UBaseType_t uxReturn;
\r
1917 pxQueue = ( Queue_t * ) xQueue;
\r
1918 configASSERT( pxQueue );
\r
1920 taskENTER_CRITICAL();
\r
1922 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1924 taskEXIT_CRITICAL();
\r
1927 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1928 /*-----------------------------------------------------------*/
\r
1930 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1932 UBaseType_t uxReturn;
\r
1934 configASSERT( xQueue );
\r
1936 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1939 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1940 /*-----------------------------------------------------------*/
\r
1942 void vQueueDelete( QueueHandle_t xQueue )
\r
1944 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1946 configASSERT( pxQueue );
\r
1947 traceQUEUE_DELETE( pxQueue );
\r
1949 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1951 vQueueUnregisterQueue( pxQueue );
\r
1955 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
\r
1957 /* The queue can only have been allocated dynamically - free it
\r
1959 vPortFree( pxQueue );
\r
1961 #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
1963 /* The queue could have been allocated statically or dynamically, so
\r
1964 check before attempting to free the memory. */
\r
1965 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
\r
1967 vPortFree( pxQueue );
\r
1971 mtCOVERAGE_TEST_MARKER();
\r
1976 /* The queue must have been statically allocated, so is not going to be
\r
1977 deleted. Avoid compiler warnings about the unused parameter. */
\r
1980 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
1982 /*-----------------------------------------------------------*/
\r
1984 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1986 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1988 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1991 #endif /* configUSE_TRACE_FACILITY */
\r
1992 /*-----------------------------------------------------------*/
\r
1994 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1996 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1998 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
2001 #endif /* configUSE_TRACE_FACILITY */
\r
2002 /*-----------------------------------------------------------*/
\r
2004 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2006 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
2008 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
2011 #endif /* configUSE_TRACE_FACILITY */
\r
2012 /*-----------------------------------------------------------*/
\r
2014 #if( configUSE_MUTEXES == 1 )
\r
2016 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
\r
2018 UBaseType_t uxHighestPriorityOfWaitingTasks;
\r
2020 /* If a task waiting for a mutex causes the mutex holder to inherit a
\r
2021 priority, but the waiting task times out, then the holder should
\r
2022 disinherit the priority - but only down to the highest priority of any
\r
2023 other tasks that are waiting for the same mutex. For this purpose,
\r
2024 return the priority of the highest priority task that is waiting for the
\r
2026 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0 )
\r
2028 uxHighestPriorityOfWaitingTasks = configMAX_PRIORITIES - listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
\r
2032 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
\r
2035 return uxHighestPriorityOfWaitingTasks;
\r
2038 #endif /* configUSE_MUTEXES */
\r
2039 /*-----------------------------------------------------------*/
\r
2041 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
2043 BaseType_t xReturn = pdFALSE;
\r
2044 UBaseType_t uxMessagesWaiting;
\r
2046 /* This function is called from a critical section. */
\r
2048 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
2050 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
2052 #if ( configUSE_MUTEXES == 1 )
\r
2054 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
2056 /* The mutex is no longer being held. */
\r
2057 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
2058 pxQueue->pxMutexHolder = NULL;
\r
2062 mtCOVERAGE_TEST_MARKER();
\r
2065 #endif /* configUSE_MUTEXES */
\r
2067 else if( xPosition == queueSEND_TO_BACK )
\r
2069 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
2070 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
2071 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2073 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
2077 mtCOVERAGE_TEST_MARKER();
\r
2082 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2083 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
2084 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2086 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
2090 mtCOVERAGE_TEST_MARKER();
\r
2093 if( xPosition == queueOVERWRITE )
\r
2095 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2097 /* An item is not being added but overwritten, so subtract
\r
2098 one from the recorded number of items in the queue so when
\r
2099 one is added again below the number of recorded items remains
\r
2101 --uxMessagesWaiting;
\r
2105 mtCOVERAGE_TEST_MARKER();
\r
2110 mtCOVERAGE_TEST_MARKER();
\r
2114 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
2118 /*-----------------------------------------------------------*/
\r
2120 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
2122 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
2124 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2125 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
2127 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2131 mtCOVERAGE_TEST_MARKER();
\r
2133 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
2136 /*-----------------------------------------------------------*/
\r
2138 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
2140 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
2142 /* The lock counts contains the number of extra data items placed or
\r
2143 removed from the queue while the queue was locked. When a queue is
\r
2144 locked items can be added or removed, but the event lists cannot be
\r
2146 taskENTER_CRITICAL();
\r
2148 int8_t cTxLock = pxQueue->cTxLock;
\r
2150 /* See if data was added to the queue while it was locked. */
\r
2151 while( cTxLock > queueLOCKED_UNMODIFIED )
\r
2153 /* Data was posted while the queue was locked. Are any tasks
\r
2154 blocked waiting for data to become available? */
\r
2155 #if ( configUSE_QUEUE_SETS == 1 )
\r
2157 if( pxQueue->pxQueueSetContainer != NULL )
\r
2159 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
\r
2161 /* The queue is a member of a queue set, and posting to
\r
2162 the queue set caused a higher priority task to unblock.
\r
2163 A context switch is required. */
\r
2164 vTaskMissedYield();
\r
2168 mtCOVERAGE_TEST_MARKER();
\r
2173 /* Tasks that are removed from the event list will get
\r
2174 added to the pending ready list as the scheduler is still
\r
2176 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2178 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2180 /* The task waiting has a higher priority so record that a
\r
2181 context switch is required. */
\r
2182 vTaskMissedYield();
\r
2186 mtCOVERAGE_TEST_MARKER();
\r
2195 #else /* configUSE_QUEUE_SETS */
\r
2197 /* Tasks that are removed from the event list will get added to
\r
2198 the pending ready list as the scheduler is still suspended. */
\r
2199 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2201 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2203 /* The task waiting has a higher priority so record that
\r
2204 a context switch is required. */
\r
2205 vTaskMissedYield();
\r
2209 mtCOVERAGE_TEST_MARKER();
\r
2217 #endif /* configUSE_QUEUE_SETS */
\r
2222 pxQueue->cTxLock = queueUNLOCKED;
\r
2224 taskEXIT_CRITICAL();
\r
2226 /* Do the same for the Rx lock. */
\r
2227 taskENTER_CRITICAL();
\r
2229 int8_t cRxLock = pxQueue->cRxLock;
\r
2231 while( cRxLock > queueLOCKED_UNMODIFIED )
\r
2233 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2235 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2237 vTaskMissedYield();
\r
2241 mtCOVERAGE_TEST_MARKER();
\r
2252 pxQueue->cRxLock = queueUNLOCKED;
\r
2254 taskEXIT_CRITICAL();
\r
2256 /*-----------------------------------------------------------*/
\r
2258 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2260 BaseType_t xReturn;
\r
2262 taskENTER_CRITICAL();
\r
2264 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2270 xReturn = pdFALSE;
\r
2273 taskEXIT_CRITICAL();
\r
2277 /*-----------------------------------------------------------*/
\r
2279 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2281 BaseType_t xReturn;
\r
2283 configASSERT( xQueue );
\r
2284 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2290 xReturn = pdFALSE;
\r
2294 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2295 /*-----------------------------------------------------------*/
\r
2297 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2299 BaseType_t xReturn;
\r
2301 taskENTER_CRITICAL();
\r
2303 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2309 xReturn = pdFALSE;
\r
2312 taskEXIT_CRITICAL();
\r
2316 /*-----------------------------------------------------------*/
\r
2318 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2320 BaseType_t xReturn;
\r
2322 configASSERT( xQueue );
\r
2323 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2329 xReturn = pdFALSE;
\r
2333 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2334 /*-----------------------------------------------------------*/
\r
2336 #if ( configUSE_CO_ROUTINES == 1 )
\r
2338 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2340 BaseType_t xReturn;
\r
2341 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2343 /* If the queue is already full we may have to block. A critical section
\r
2344 is required to prevent an interrupt removing something from the queue
\r
2345 between the check to see if the queue is full and blocking on the queue. */
\r
2346 portDISABLE_INTERRUPTS();
\r
2348 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2350 /* The queue is full - do we want to block or just leave without
\r
2352 if( xTicksToWait > ( TickType_t ) 0 )
\r
2354 /* As this is called from a coroutine we cannot block directly, but
\r
2355 return indicating that we need to block. */
\r
2356 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2357 portENABLE_INTERRUPTS();
\r
2358 return errQUEUE_BLOCKED;
\r
2362 portENABLE_INTERRUPTS();
\r
2363 return errQUEUE_FULL;
\r
2367 portENABLE_INTERRUPTS();
\r
2369 portDISABLE_INTERRUPTS();
\r
2371 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2373 /* There is room in the queue, copy the data into the queue. */
\r
2374 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2377 /* Were any co-routines waiting for data to become available? */
\r
2378 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2380 /* In this instance the co-routine could be placed directly
\r
2381 into the ready list as we are within a critical section.
\r
2382 Instead the same pending ready list mechanism is used as if
\r
2383 the event were caused from within an interrupt. */
\r
2384 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2386 /* The co-routine waiting has a higher priority so record
\r
2387 that a yield might be appropriate. */
\r
2388 xReturn = errQUEUE_YIELD;
\r
2392 mtCOVERAGE_TEST_MARKER();
\r
2397 mtCOVERAGE_TEST_MARKER();
\r
2402 xReturn = errQUEUE_FULL;
\r
2405 portENABLE_INTERRUPTS();
\r
2410 #endif /* configUSE_CO_ROUTINES */
\r
2411 /*-----------------------------------------------------------*/
\r
2413 #if ( configUSE_CO_ROUTINES == 1 )
\r
2415 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2417 BaseType_t xReturn;
\r
2418 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2420 /* If the queue is already empty we may have to block. A critical section
\r
2421 is required to prevent an interrupt adding something to the queue
\r
2422 between the check to see if the queue is empty and blocking on the queue. */
\r
2423 portDISABLE_INTERRUPTS();
\r
2425 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2427 /* There are no messages in the queue, do we want to block or just
\r
2428 leave with nothing? */
\r
2429 if( xTicksToWait > ( TickType_t ) 0 )
\r
2431 /* As this is a co-routine we cannot block directly, but return
\r
2432 indicating that we need to block. */
\r
2433 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2434 portENABLE_INTERRUPTS();
\r
2435 return errQUEUE_BLOCKED;
\r
2439 portENABLE_INTERRUPTS();
\r
2440 return errQUEUE_FULL;
\r
2445 mtCOVERAGE_TEST_MARKER();
\r
2448 portENABLE_INTERRUPTS();
\r
2450 portDISABLE_INTERRUPTS();
\r
2452 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2454 /* Data is available from the queue. */
\r
2455 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2456 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2458 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2462 mtCOVERAGE_TEST_MARKER();
\r
2464 --( pxQueue->uxMessagesWaiting );
\r
2465 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2469 /* Were any co-routines waiting for space to become available? */
\r
2470 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2472 /* In this instance the co-routine could be placed directly
\r
2473 into the ready list as we are within a critical section.
\r
2474 Instead the same pending ready list mechanism is used as if
\r
2475 the event were caused from within an interrupt. */
\r
2476 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2478 xReturn = errQUEUE_YIELD;
\r
2482 mtCOVERAGE_TEST_MARKER();
\r
2487 mtCOVERAGE_TEST_MARKER();
\r
2495 portENABLE_INTERRUPTS();
\r
2500 #endif /* configUSE_CO_ROUTINES */
\r
2501 /*-----------------------------------------------------------*/
\r
2503 #if ( configUSE_CO_ROUTINES == 1 )
\r
2505 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2507 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2509 /* Cannot block within an ISR so if there is no space on the queue then
\r
2510 exit without doing anything. */
\r
2511 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2513 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2515 /* We only want to wake one co-routine per ISR, so check that a
\r
2516 co-routine has not already been woken. */
\r
2517 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2519 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2521 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2527 mtCOVERAGE_TEST_MARKER();
\r
2532 mtCOVERAGE_TEST_MARKER();
\r
2537 mtCOVERAGE_TEST_MARKER();
\r
2542 mtCOVERAGE_TEST_MARKER();
\r
2545 return xCoRoutinePreviouslyWoken;
\r
2548 #endif /* configUSE_CO_ROUTINES */
\r
2549 /*-----------------------------------------------------------*/
\r
2551 #if ( configUSE_CO_ROUTINES == 1 )
\r
2553 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2555 BaseType_t xReturn;
\r
2556 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2558 /* We cannot block from an ISR, so check there is data available. If
\r
2559 not then just leave without doing anything. */
\r
2560 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2562 /* Copy the data from the queue. */
\r
2563 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2564 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2566 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2570 mtCOVERAGE_TEST_MARKER();
\r
2572 --( pxQueue->uxMessagesWaiting );
\r
2573 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2575 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2577 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2579 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2581 *pxCoRoutineWoken = pdTRUE;
\r
2585 mtCOVERAGE_TEST_MARKER();
\r
2590 mtCOVERAGE_TEST_MARKER();
\r
2595 mtCOVERAGE_TEST_MARKER();
\r
2608 #endif /* configUSE_CO_ROUTINES */
\r
2609 /*-----------------------------------------------------------*/
\r
2611 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2613 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2617 /* See if there is an empty space in the registry. A NULL name denotes
\r
2619 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2621 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2623 /* Store the information on this queue. */
\r
2624 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2625 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2627 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2632 mtCOVERAGE_TEST_MARKER();
\r
2637 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2638 /*-----------------------------------------------------------*/
\r
2640 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2642 const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2645 const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2647 /* Note there is nothing here to protect against another task adding or
\r
2648 removing entries from the registry while it is being searched. */
\r
2649 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2651 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2653 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2658 mtCOVERAGE_TEST_MARKER();
\r
2663 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
\r
2665 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2666 /*-----------------------------------------------------------*/
\r
2668 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2670 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2674 /* See if the handle of the queue being unregistered in actually in the
\r
2676 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2678 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2680 /* Set the name to NULL to show that this slot if free again. */
\r
2681 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2683 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2684 appear in the registry twice if it is added, removed, then
\r
2686 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2691 mtCOVERAGE_TEST_MARKER();
\r
2695 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2697 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2698 /*-----------------------------------------------------------*/
\r
2700 #if ( configUSE_TIMERS == 1 )
\r
2702 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2704 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2706 /* This function should not be called by application code hence the
\r
2707 'Restricted' in its name. It is not part of the public API. It is
\r
2708 designed for use by kernel code, and has special calling requirements.
\r
2709 It can result in vListInsert() being called on a list that can only
\r
2710 possibly ever have one item in it, so the list will be fast, but even
\r
2711 so it should be called with the scheduler locked and not from a critical
\r
2714 /* Only do anything if there are no messages in the queue. This function
\r
2715 will not actually cause the task to block, just place it on a blocked
\r
2716 list. It will not block until the scheduler is unlocked - at which
\r
2717 time a yield will be performed. If an item is added to the queue while
\r
2718 the queue is locked, and the calling task blocks on the queue, then the
\r
2719 calling task will be immediately unblocked when the queue is unlocked. */
\r
2720 prvLockQueue( pxQueue );
\r
2721 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2723 /* There is nothing in the queue, block for the specified period. */
\r
2724 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2728 mtCOVERAGE_TEST_MARKER();
\r
2730 prvUnlockQueue( pxQueue );
\r
2733 #endif /* configUSE_TIMERS */
\r
2734 /*-----------------------------------------------------------*/
\r
2736 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
2738 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2740 QueueSetHandle_t pxQueue;
\r
2742 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2747 #endif /* configUSE_QUEUE_SETS */
\r
2748 /*-----------------------------------------------------------*/
\r
2750 #if ( configUSE_QUEUE_SETS == 1 )
\r
2752 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2754 BaseType_t xReturn;
\r
2756 taskENTER_CRITICAL();
\r
2758 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2760 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2763 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2765 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2766 items in the queue/semaphore. */
\r
2771 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2775 taskEXIT_CRITICAL();
\r
2780 #endif /* configUSE_QUEUE_SETS */
\r
2781 /*-----------------------------------------------------------*/
\r
2783 #if ( configUSE_QUEUE_SETS == 1 )
\r
2785 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2787 BaseType_t xReturn;
\r
2788 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2790 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2792 /* The queue was not a member of the set. */
\r
2795 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2797 /* It is dangerous to remove a queue from a set when the queue is
\r
2798 not empty because the queue set will still hold pending events for
\r
2804 taskENTER_CRITICAL();
\r
2806 /* The queue is no longer contained in the set. */
\r
2807 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2809 taskEXIT_CRITICAL();
\r
2814 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2816 #endif /* configUSE_QUEUE_SETS */
\r
2817 /*-----------------------------------------------------------*/
\r
2819 #if ( configUSE_QUEUE_SETS == 1 )
\r
2821 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2823 QueueSetMemberHandle_t xReturn = NULL;
\r
2825 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2829 #endif /* configUSE_QUEUE_SETS */
\r
2830 /*-----------------------------------------------------------*/
\r
2832 #if ( configUSE_QUEUE_SETS == 1 )
\r
2834 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2836 QueueSetMemberHandle_t xReturn = NULL;
\r
2838 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2842 #endif /* configUSE_QUEUE_SETS */
\r
2843 /*-----------------------------------------------------------*/
\r
2845 #if ( configUSE_QUEUE_SETS == 1 )
\r
2847 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2849 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2850 BaseType_t xReturn = pdFALSE;
\r
2852 /* This function must be called form a critical section. */
\r
2854 configASSERT( pxQueueSetContainer );
\r
2855 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2857 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2859 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
\r
2861 traceQUEUE_SEND( pxQueueSetContainer );
\r
2863 /* The data copied is the handle of the queue that contains data. */
\r
2864 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2866 if( cTxLock == queueUNLOCKED )
\r
2868 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2870 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2872 /* The task waiting has a higher priority. */
\r
2877 mtCOVERAGE_TEST_MARKER();
\r
2882 mtCOVERAGE_TEST_MARKER();
\r
2887 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
2892 mtCOVERAGE_TEST_MARKER();
\r
2898 #endif /* configUSE_QUEUE_SETS */
\r