2 FreeRTOS V7.6.0 - Copyright (C) 2013 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 ***************************************************************************
\r
9 * FreeRTOS provides completely free yet professionally developed, *
\r
10 * robust, strictly quality controlled, supported, and cross *
\r
11 * platform software that has become a de facto standard. *
\r
13 * Help yourself get started quickly and support the FreeRTOS *
\r
14 * project by purchasing a FreeRTOS tutorial book, reference *
\r
15 * manual, or both from: http://www.FreeRTOS.org/Documentation *
\r
19 ***************************************************************************
\r
21 This file is part of the FreeRTOS distribution.
\r
23 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
24 the terms of the GNU General Public License (version 2) as published by the
\r
25 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
27 >>! NOTE: The modification to the GPL is included to allow you to distribute
\r
28 >>! a combined work that includes FreeRTOS without being obliged to provide
\r
29 >>! the source code for proprietary components outside of the FreeRTOS
\r
32 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
33 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
34 FOR A PARTICULAR PURPOSE. Full license text is available from the following
\r
35 link: http://www.freertos.org/a00114.html
\r
39 ***************************************************************************
\r
41 * Having a problem? Start by reading the FAQ "My application does *
\r
42 * not run, what could be wrong?" *
\r
44 * http://www.FreeRTOS.org/FAQHelp.html *
\r
46 ***************************************************************************
\r
48 http://www.FreeRTOS.org - Documentation, books, training, latest versions,
\r
49 license and Real Time Engineers Ltd. contact details.
\r
51 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
52 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
53 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
55 http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
\r
56 Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
57 licenses offer ticketed support, indemnification and middleware.
\r
59 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
60 engineered and independently SIL3 certified version for use in safety and
\r
61 mission critical applications that require provable dependability.
\r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
70 all the API functions to use the MPU wrappers. That should only be done when
\r
71 task.h is included from an application file. */
\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
74 #include "FreeRTOS.h"
\r
78 #if ( configUSE_CO_ROUTINES == 1 )
\r
79 #include "croutine.h"
\r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
84 header files above, but not in this file, in order to generate the correct
\r
85 privileged Vs unprivileged linkage and placement. */
\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
89 /* Constants used with the cRxLock and xTxLock structure members. */
\r
90 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
91 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
93 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
94 pcTail members are used as pointers into the queue storage area. When the
\r
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
97 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
98 names to the pcHead and pcTail structure members to ensure the readability of
\r
99 the code is maintained despite this dual use of two structure members. An
\r
100 alternative implementation would be to use a union, but use of a union is
\r
101 against the coding standard (although an exception to the standard has been
\r
102 permitted where the dual use also significantly changes the type of the
\r
103 structure member). */
\r
104 #define pxMutexHolder pcTail
\r
105 #define uxQueueType pcHead
\r
106 #define queueQUEUE_IS_MUTEX NULL
\r
108 /* Semaphores do not actually store or copy data, so have an item size of
\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
111 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
113 #if( configUSE_PREEMPTION == 0 )
\r
114 /* If the cooperative scheduler is being used then a yield should not be
\r
115 performed just because a higher priority task has been woken. */
\r
116 #define queueYIELD_IF_USING_PREEMPTION()
\r
118 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
122 * Definition of the queue used by the scheduler.
\r
123 * Items are queued by copy, not reference.
\r
125 typedef struct QueueDefinition
\r
127 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
128 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
129 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
131 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
133 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
134 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
137 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
138 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
140 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
141 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
142 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
144 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
145 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
147 #if ( configUSE_TRACE_FACILITY == 1 )
\r
148 UBaseType_t uxQueueNumber;
\r
149 uint8_t ucQueueType;
\r
152 #if ( configUSE_QUEUE_SETS == 1 )
\r
153 struct QueueDefinition *pxQueueSetContainer;
\r
157 /*-----------------------------------------------------------*/
\r
160 * The queue registry is just a means for kernel aware debuggers to locate
\r
161 * queue structures. It has no other purpose so is an optional component.
\r
163 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
165 /* The type stored within the queue registry array. This allows a name
\r
166 to be assigned to each queue making kernel aware debugging a little
\r
167 more user friendly. */
\r
168 typedef struct QUEUE_REGISTRY_ITEM
\r
170 char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
171 QueueHandle_t xHandle;
\r
172 } QueueRegistryItem_t;
\r
174 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
175 The pcQueueName member of a structure being NULL is indicative of the
\r
176 array position being vacant. */
\r
177 QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
179 #endif /* configQUEUE_REGISTRY_SIZE */
\r
182 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
183 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
184 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
185 * queue is locked it will instead increment the appropriate queue lock count
\r
186 * to indicate that a task may require unblocking. When the queue in unlocked
\r
187 * these lock counts are inspected, and the appropriate action taken.
\r
189 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
192 * Uses a critical section to determine if there is any data in a queue.
\r
194 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
196 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
199 * Uses a critical section to determine if there is any space in a queue.
\r
201 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
203 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
206 * Copies an item into the queue, either at the front of the queue or the
\r
207 * back of the queue.
\r
209 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
212 * Copies an item out of a queue.
\r
214 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
216 #if ( configUSE_QUEUE_SETS == 1 )
\r
218 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
219 * the queue set that the queue contains data.
\r
221 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
224 /*-----------------------------------------------------------*/
\r
227 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
228 * accessing the queue event lists.
\r
230 #define prvLockQueue( pxQueue ) \
\r
231 taskENTER_CRITICAL(); \
\r
233 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
235 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
237 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
239 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
242 taskEXIT_CRITICAL()
\r
243 /*-----------------------------------------------------------*/
\r
245 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
247 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
249 configASSERT( pxQueue );
\r
251 taskENTER_CRITICAL();
\r
253 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
254 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
255 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
256 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
257 pxQueue->xRxLock = queueUNLOCKED;
\r
258 pxQueue->xTxLock = queueUNLOCKED;
\r
260 if( xNewQueue == pdFALSE )
\r
262 /* If there are tasks blocked waiting to read from the queue, then
\r
263 the tasks will remain blocked as after this function exits the queue
\r
264 will still be empty. If there are tasks blocked waiting to write to
\r
265 the queue, then one should be unblocked as after this function exits
\r
266 it will be possible to write to it. */
\r
267 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
269 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
271 queueYIELD_IF_USING_PREEMPTION();
\r
275 mtCOVERAGE_TEST_MARKER();
\r
280 mtCOVERAGE_TEST_MARKER();
\r
285 /* Ensure the event queues start in the correct state. */
\r
286 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
287 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
290 taskEXIT_CRITICAL();
\r
292 /* A value is returned for calling semantic consistency with previous
\r
296 /*-----------------------------------------------------------*/
\r
298 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
300 Queue_t *pxNewQueue;
\r
301 size_t xQueueSizeInBytes;
\r
302 QueueHandle_t xReturn = NULL;
\r
304 /* Remove compiler warnings about unused parameters should
\r
305 configUSE_TRACE_FACILITY not be set to 1. */
\r
306 ( void ) ucQueueType;
\r
308 /* Allocate the new queue structure. */
\r
309 if( uxQueueLength > ( UBaseType_t ) 0 )
\r
311 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
312 if( pxNewQueue != NULL )
\r
314 /* Create the list of pointers to queue items. The queue is one byte
\r
315 longer than asked for to make wrap checking easier/faster. */
\r
316 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
318 pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );
\r
319 if( pxNewQueue->pcHead != NULL )
\r
321 /* Initialise the queue members as described above where the
\r
322 queue type is defined. */
\r
323 pxNewQueue->uxLength = uxQueueLength;
\r
324 pxNewQueue->uxItemSize = uxItemSize;
\r
325 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
327 #if ( configUSE_TRACE_FACILITY == 1 )
\r
329 pxNewQueue->ucQueueType = ucQueueType;
\r
331 #endif /* configUSE_TRACE_FACILITY */
\r
333 #if( configUSE_QUEUE_SETS == 1 )
\r
335 pxNewQueue->pxQueueSetContainer = NULL;
\r
337 #endif /* configUSE_QUEUE_SETS */
\r
339 traceQUEUE_CREATE( pxNewQueue );
\r
340 xReturn = pxNewQueue;
\r
344 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
345 vPortFree( pxNewQueue );
\r
350 mtCOVERAGE_TEST_MARKER();
\r
355 mtCOVERAGE_TEST_MARKER();
\r
358 configASSERT( xReturn );
\r
362 /*-----------------------------------------------------------*/
\r
364 #if ( configUSE_MUTEXES == 1 )
\r
366 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
368 Queue_t *pxNewQueue;
\r
370 /* Prevent compiler warnings about unused parameters if
\r
371 configUSE_TRACE_FACILITY does not equal 1. */
\r
372 ( void ) ucQueueType;
\r
374 /* Allocate the new queue structure. */
\r
375 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
376 if( pxNewQueue != NULL )
\r
378 /* Information required for priority inheritance. */
\r
379 pxNewQueue->pxMutexHolder = NULL;
\r
380 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
382 /* Queues used as a mutex no data is actually copied into or out
\r
384 pxNewQueue->pcWriteTo = NULL;
\r
385 pxNewQueue->u.pcReadFrom = NULL;
\r
387 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
388 an item size of 0 as nothing is actually copied into or out
\r
390 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
391 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
392 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
393 pxNewQueue->xRxLock = queueUNLOCKED;
\r
394 pxNewQueue->xTxLock = queueUNLOCKED;
\r
396 #if ( configUSE_TRACE_FACILITY == 1 )
\r
398 pxNewQueue->ucQueueType = ucQueueType;
\r
402 #if ( configUSE_QUEUE_SETS == 1 )
\r
404 pxNewQueue->pxQueueSetContainer = NULL;
\r
408 /* Ensure the event queues start with the correct state. */
\r
409 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
410 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
412 traceCREATE_MUTEX( pxNewQueue );
\r
414 /* Start with the semaphore in the expected state. */
\r
415 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
419 traceCREATE_MUTEX_FAILED();
\r
422 configASSERT( pxNewQueue );
\r
426 #endif /* configUSE_MUTEXES */
\r
427 /*-----------------------------------------------------------*/
\r
429 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
431 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
435 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
436 be called directly. Note: This is a good way of determining if the
\r
437 calling task is the mutex holder, but not a good way of determining the
\r
438 identity of the mutex holder, as the holder may change between the
\r
439 following critical section exiting and the function returning. */
\r
440 taskENTER_CRITICAL();
\r
442 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
444 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
451 taskEXIT_CRITICAL();
\r
457 /*-----------------------------------------------------------*/
\r
459 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
461 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
463 BaseType_t xReturn;
\r
464 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
466 configASSERT( pxMutex );
\r
468 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
469 change outside of this task. If this task does not hold the mutex then
\r
470 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
471 this is the only condition we are interested in it does not matter if
\r
472 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
473 mutual exclusion is required to test the pxMutexHolder variable. */
\r
474 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
476 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
478 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
479 the task handle, therefore no underflow check is required. Also,
\r
480 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
481 there can only be one, no mutual exclusion is required to modify the
\r
482 uxRecursiveCallCount member. */
\r
483 ( pxMutex->u.uxRecursiveCallCount )--;
\r
485 /* Have we unwound the call count? */
\r
486 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
488 /* Return the mutex. This will automatically unblock any other
\r
489 task that might be waiting to access the mutex. */
\r
490 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
494 mtCOVERAGE_TEST_MARKER();
\r
501 /* We cannot give the mutex because we are not the holder. */
\r
504 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
510 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
511 /*-----------------------------------------------------------*/
\r
513 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
515 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xBlockTime )
\r
517 BaseType_t xReturn;
\r
518 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
520 configASSERT( pxMutex );
\r
522 /* Comments regarding mutual exclusion as per those within
\r
523 xQueueGiveMutexRecursive(). */
\r
525 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
527 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
529 ( pxMutex->u.uxRecursiveCallCount )++;
\r
534 xReturn = xQueueGenericReceive( pxMutex, NULL, xBlockTime, pdFALSE );
\r
536 /* pdPASS will only be returned if we successfully obtained the mutex,
\r
537 we may have blocked to reach here. */
\r
538 if( xReturn == pdPASS )
\r
540 ( pxMutex->u.uxRecursiveCallCount )++;
\r
544 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
551 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
552 /*-----------------------------------------------------------*/
\r
554 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
556 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
558 QueueHandle_t xHandle;
\r
560 configASSERT( uxMaxCount != 0 );
\r
561 configASSERT( uxInitialCount <= uxMaxCount );
\r
563 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
565 if( xHandle != NULL )
\r
567 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
569 traceCREATE_COUNTING_SEMAPHORE();
\r
573 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
576 configASSERT( xHandle );
\r
580 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
581 /*-----------------------------------------------------------*/
\r
583 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
585 BaseType_t xEntryTimeSet = pdFALSE;
\r
586 TimeOut_t xTimeOut;
\r
587 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
589 configASSERT( pxQueue );
\r
590 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
591 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
592 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
594 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
599 /* This function relaxes the coding standard somewhat to allow return
\r
600 statements within the function itself. This is done in the interest
\r
601 of execution time efficiency. */
\r
604 taskENTER_CRITICAL();
\r
606 /* Is there room on the queue now? The running task must be
\r
607 the highest priority task wanting to access the queue. If
\r
608 the head item in the queue is to be overwritten then it does
\r
609 not matter if the queue is full. */
\r
610 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
612 traceQUEUE_SEND( pxQueue );
\r
613 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
615 #if ( configUSE_QUEUE_SETS == 1 )
\r
617 if( pxQueue->pxQueueSetContainer != NULL )
\r
619 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
621 /* The queue is a member of a queue set, and posting
\r
622 to the queue set caused a higher priority task to
\r
623 unblock. A context switch is required. */
\r
624 queueYIELD_IF_USING_PREEMPTION();
\r
628 mtCOVERAGE_TEST_MARKER();
\r
633 /* If there was a task waiting for data to arrive on the
\r
634 queue then unblock it now. */
\r
635 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
637 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
639 /* The unblocked task has a priority higher than
\r
640 our own so yield immediately. Yes it is ok to
\r
641 do this from within the critical section - the
\r
642 kernel takes care of that. */
\r
643 queueYIELD_IF_USING_PREEMPTION();
\r
647 mtCOVERAGE_TEST_MARKER();
\r
652 mtCOVERAGE_TEST_MARKER();
\r
656 #else /* configUSE_QUEUE_SETS */
\r
658 /* If there was a task waiting for data to arrive on the
\r
659 queue then unblock it now. */
\r
660 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
662 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
664 /* The unblocked task has a priority higher than
\r
665 our own so yield immediately. Yes it is ok to do
\r
666 this from within the critical section - the kernel
\r
667 takes care of that. */
\r
668 queueYIELD_IF_USING_PREEMPTION();
\r
672 mtCOVERAGE_TEST_MARKER();
\r
677 mtCOVERAGE_TEST_MARKER();
\r
680 #endif /* configUSE_QUEUE_SETS */
\r
682 taskEXIT_CRITICAL();
\r
684 /* Return to the original privilege level before exiting the
\r
690 if( xTicksToWait == ( TickType_t ) 0 )
\r
692 /* The queue was full and no block time is specified (or
\r
693 the block time has expired) so leave now. */
\r
694 taskEXIT_CRITICAL();
\r
696 /* Return to the original privilege level before exiting
\r
698 traceQUEUE_SEND_FAILED( pxQueue );
\r
699 return errQUEUE_FULL;
\r
701 else if( xEntryTimeSet == pdFALSE )
\r
703 /* The queue was full and a block time was specified so
\r
704 configure the timeout structure. */
\r
705 vTaskSetTimeOutState( &xTimeOut );
\r
706 xEntryTimeSet = pdTRUE;
\r
710 /* Entry time was already set. */
\r
711 mtCOVERAGE_TEST_MARKER();
\r
715 taskEXIT_CRITICAL();
\r
717 /* Interrupts and other tasks can send to and receive from the queue
\r
718 now the critical section has been exited. */
\r
721 prvLockQueue( pxQueue );
\r
723 /* Update the timeout state to see if it has expired yet. */
\r
724 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
726 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
728 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
729 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
731 /* Unlocking the queue means queue events can effect the
\r
732 event list. It is possible that interrupts occurring now
\r
733 remove this task from the event list again - but as the
\r
734 scheduler is suspended the task will go onto the pending
\r
735 ready last instead of the actual ready list. */
\r
736 prvUnlockQueue( pxQueue );
\r
738 /* Resuming the scheduler will move tasks from the pending
\r
739 ready list into the ready list - so it is feasible that this
\r
740 task is already in a ready list before it yields - in which
\r
741 case the yield will not cause a context switch unless there
\r
742 is also a higher priority task in the pending ready list. */
\r
743 if( xTaskResumeAll() == pdFALSE )
\r
745 portYIELD_WITHIN_API();
\r
751 prvUnlockQueue( pxQueue );
\r
752 ( void ) xTaskResumeAll();
\r
757 /* The timeout has expired. */
\r
758 prvUnlockQueue( pxQueue );
\r
759 ( void ) xTaskResumeAll();
\r
761 /* Return to the original privilege level before exiting the
\r
763 traceQUEUE_SEND_FAILED( pxQueue );
\r
764 return errQUEUE_FULL;
\r
768 /*-----------------------------------------------------------*/
\r
770 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
772 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
774 BaseType_t xEntryTimeSet = pdFALSE;
\r
775 TimeOut_t xTimeOut;
\r
776 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
778 configASSERT( pxQueue );
\r
779 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
783 taskENTER_CRITICAL();
\r
785 /* Is there room on the queue now? To be running we must be
\r
786 the highest priority task wanting to access the queue. */
\r
787 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
789 traceQUEUE_SEND( pxQueue );
\r
790 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
792 /* If there was a task waiting for data to arrive on the
\r
793 queue then unblock it now. */
\r
794 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
796 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
798 /* The unblocked task has a priority higher than
\r
799 our own so yield immediately. */
\r
800 portYIELD_WITHIN_API();
\r
804 mtCOVERAGE_TEST_MARKER();
\r
809 mtCOVERAGE_TEST_MARKER();
\r
812 taskEXIT_CRITICAL();
\r
817 if( xTicksToWait == ( TickType_t ) 0 )
\r
819 taskEXIT_CRITICAL();
\r
820 return errQUEUE_FULL;
\r
822 else if( xEntryTimeSet == pdFALSE )
\r
824 vTaskSetTimeOutState( &xTimeOut );
\r
825 xEntryTimeSet = pdTRUE;
\r
829 taskEXIT_CRITICAL();
\r
831 taskENTER_CRITICAL();
\r
833 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
835 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
837 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
838 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
839 portYIELD_WITHIN_API();
\r
843 mtCOVERAGE_TEST_MARKER();
\r
848 taskEXIT_CRITICAL();
\r
849 traceQUEUE_SEND_FAILED( pxQueue );
\r
850 return errQUEUE_FULL;
\r
853 taskEXIT_CRITICAL();
\r
857 #endif /* configUSE_ALTERNATIVE_API */
\r
858 /*-----------------------------------------------------------*/
\r
860 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
862 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
864 BaseType_t xEntryTimeSet = pdFALSE;
\r
865 TimeOut_t xTimeOut;
\r
866 int8_t *pcOriginalReadPosition;
\r
867 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
869 configASSERT( pxQueue );
\r
870 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
874 taskENTER_CRITICAL();
\r
876 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
878 /* Remember our read position in case we are just peeking. */
\r
879 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
881 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
883 if( xJustPeeking == pdFALSE )
\r
885 traceQUEUE_RECEIVE( pxQueue );
\r
887 /* Data is actually being removed (not just peeked). */
\r
888 --( pxQueue->uxMessagesWaiting );
\r
890 #if ( configUSE_MUTEXES == 1 )
\r
892 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
894 /* Record the information required to implement
\r
895 priority inheritance should it become necessary. */
\r
896 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
900 mtCOVERAGE_TEST_MARKER();
\r
905 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
907 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
909 portYIELD_WITHIN_API();
\r
913 mtCOVERAGE_TEST_MARKER();
\r
919 traceQUEUE_PEEK( pxQueue );
\r
921 /* We are not removing the data, so reset our read
\r
923 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
925 /* The data is being left in the queue, so see if there are
\r
926 any other tasks waiting for the data. */
\r
927 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
929 /* Tasks that are removed from the event list will get added to
\r
930 the pending ready list as the scheduler is still suspended. */
\r
931 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
933 /* The task waiting has a higher priority than this task. */
\r
934 portYIELD_WITHIN_API();
\r
938 mtCOVERAGE_TEST_MARKER();
\r
943 mtCOVERAGE_TEST_MARKER();
\r
947 taskEXIT_CRITICAL();
\r
952 if( xTicksToWait == ( TickType_t ) 0 )
\r
954 taskEXIT_CRITICAL();
\r
955 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
956 return errQUEUE_EMPTY;
\r
958 else if( xEntryTimeSet == pdFALSE )
\r
960 vTaskSetTimeOutState( &xTimeOut );
\r
961 xEntryTimeSet = pdTRUE;
\r
965 taskEXIT_CRITICAL();
\r
967 taskENTER_CRITICAL();
\r
969 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
971 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
973 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
975 #if ( configUSE_MUTEXES == 1 )
\r
977 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
979 taskENTER_CRITICAL();
\r
981 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
983 taskEXIT_CRITICAL();
\r
987 mtCOVERAGE_TEST_MARKER();
\r
992 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
993 portYIELD_WITHIN_API();
\r
997 mtCOVERAGE_TEST_MARKER();
\r
1002 taskEXIT_CRITICAL();
\r
1003 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1004 return errQUEUE_EMPTY;
\r
1007 taskEXIT_CRITICAL();
\r
1012 #endif /* configUSE_ALTERNATIVE_API */
\r
1013 /*-----------------------------------------------------------*/
\r
1015 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1017 BaseType_t xReturn;
\r
1018 UBaseType_t uxSavedInterruptStatus;
\r
1019 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1021 configASSERT( pxQueue );
\r
1022 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1023 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1025 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1026 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1027 above the maximum system call priority are kept permanently enabled, even
\r
1028 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1029 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1030 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1031 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1032 assigned a priority above the configured maximum system call priority.
\r
1033 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1034 that have been assigned a priority at or (logically) below the maximum
\r
1035 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1036 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1037 More information (albeit Cortex-M specific) is provided on the following
\r
1038 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1039 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1041 /* Similar to xQueueGenericSend, except we don't block if there is no room
\r
1042 in the queue. Also we don't directly wake a task that was blocked on a
\r
1043 queue read, instead we return a flag to say whether a context switch is
\r
1044 required or not (i.e. has a task with a higher priority than us been woken
\r
1046 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1048 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1050 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1052 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1054 /* If the queue is locked we do not alter the event list. This will
\r
1055 be done when the queue is unlocked later. */
\r
1056 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1058 #if ( configUSE_QUEUE_SETS == 1 )
\r
1060 if( pxQueue->pxQueueSetContainer != NULL )
\r
1062 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1064 /* The queue is a member of a queue set, and posting
\r
1065 to the queue set caused a higher priority task to
\r
1066 unblock. A context switch is required. */
\r
1067 if( pxHigherPriorityTaskWoken != NULL )
\r
1069 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1073 mtCOVERAGE_TEST_MARKER();
\r
1078 mtCOVERAGE_TEST_MARKER();
\r
1083 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1085 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1087 /* The task waiting has a higher priority so record that a
\r
1088 context switch is required. */
\r
1089 if( pxHigherPriorityTaskWoken != NULL )
\r
1091 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1095 mtCOVERAGE_TEST_MARKER();
\r
1100 mtCOVERAGE_TEST_MARKER();
\r
1105 mtCOVERAGE_TEST_MARKER();
\r
1109 #else /* configUSE_QUEUE_SETS */
\r
1111 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1113 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1115 /* The task waiting has a higher priority so record that a
\r
1116 context switch is required. */
\r
1117 if( pxHigherPriorityTaskWoken != NULL )
\r
1119 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1123 mtCOVERAGE_TEST_MARKER();
\r
1128 mtCOVERAGE_TEST_MARKER();
\r
1133 mtCOVERAGE_TEST_MARKER();
\r
1136 #endif /* configUSE_QUEUE_SETS */
\r
1140 /* Increment the lock count so the task that unlocks the queue
\r
1141 knows that data was posted while it was locked. */
\r
1142 ++( pxQueue->xTxLock );
\r
1149 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1150 xReturn = errQUEUE_FULL;
\r
1153 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1157 /*-----------------------------------------------------------*/
\r
1159 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1161 BaseType_t xEntryTimeSet = pdFALSE;
\r
1162 TimeOut_t xTimeOut;
\r
1163 int8_t *pcOriginalReadPosition;
\r
1164 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1166 configASSERT( pxQueue );
\r
1167 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1168 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1170 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1174 /* This function relaxes the coding standard somewhat to allow return
\r
1175 statements within the function itself. This is done in the interest
\r
1176 of execution time efficiency. */
\r
1180 taskENTER_CRITICAL();
\r
1182 /* Is there data in the queue now? To be running we must be
\r
1183 the highest priority task wanting to access the queue. */
\r
1184 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1186 /* Remember the read position in case the queue is only being
\r
1188 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1190 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1192 if( xJustPeeking == pdFALSE )
\r
1194 traceQUEUE_RECEIVE( pxQueue );
\r
1196 /* Actually removing data, not just peeking. */
\r
1197 --( pxQueue->uxMessagesWaiting );
\r
1199 #if ( configUSE_MUTEXES == 1 )
\r
1201 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1203 /* Record the information required to implement
\r
1204 priority inheritance should it become necessary. */
\r
1205 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1209 mtCOVERAGE_TEST_MARKER();
\r
1214 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1216 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1218 queueYIELD_IF_USING_PREEMPTION();
\r
1222 mtCOVERAGE_TEST_MARKER();
\r
1227 mtCOVERAGE_TEST_MARKER();
\r
1232 traceQUEUE_PEEK( pxQueue );
\r
1234 /* The data is not being removed, so reset the read
\r
1236 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1238 /* The data is being left in the queue, so see if there are
\r
1239 any other tasks waiting for the data. */
\r
1240 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1242 /* Tasks that are removed from the event list will get added to
\r
1243 the pending ready list as the scheduler is still suspended. */
\r
1244 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1246 /* The task waiting has a higher priority than this task. */
\r
1247 queueYIELD_IF_USING_PREEMPTION();
\r
1251 mtCOVERAGE_TEST_MARKER();
\r
1256 mtCOVERAGE_TEST_MARKER();
\r
1260 taskEXIT_CRITICAL();
\r
1265 if( xTicksToWait == ( TickType_t ) 0 )
\r
1267 /* The queue was empty and no block time is specified (or
\r
1268 the block time has expired) so leave now. */
\r
1269 taskEXIT_CRITICAL();
\r
1270 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1271 return errQUEUE_EMPTY;
\r
1273 else if( xEntryTimeSet == pdFALSE )
\r
1275 /* The queue was empty and a block time was specified so
\r
1276 configure the timeout structure. */
\r
1277 vTaskSetTimeOutState( &xTimeOut );
\r
1278 xEntryTimeSet = pdTRUE;
\r
1282 /* Entry time was already set. */
\r
1283 mtCOVERAGE_TEST_MARKER();
\r
1287 taskEXIT_CRITICAL();
\r
1289 /* Interrupts and other tasks can send to and receive from the queue
\r
1290 now the critical section has been exited. */
\r
1292 vTaskSuspendAll();
\r
1293 prvLockQueue( pxQueue );
\r
1295 /* Update the timeout state to see if it has expired yet. */
\r
1296 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1298 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1300 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1302 #if ( configUSE_MUTEXES == 1 )
\r
1304 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1306 taskENTER_CRITICAL();
\r
1308 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1310 taskEXIT_CRITICAL();
\r
1314 mtCOVERAGE_TEST_MARKER();
\r
1319 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1320 prvUnlockQueue( pxQueue );
\r
1321 if( xTaskResumeAll() == pdFALSE )
\r
1323 portYIELD_WITHIN_API();
\r
1327 mtCOVERAGE_TEST_MARKER();
\r
1333 prvUnlockQueue( pxQueue );
\r
1334 ( void ) xTaskResumeAll();
\r
1339 prvUnlockQueue( pxQueue );
\r
1340 ( void ) xTaskResumeAll();
\r
1341 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1342 return errQUEUE_EMPTY;
\r
1346 /*-----------------------------------------------------------*/
\r
1348 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1350 BaseType_t xReturn;
\r
1351 UBaseType_t uxSavedInterruptStatus;
\r
1352 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1354 configASSERT( pxQueue );
\r
1355 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1357 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1358 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1359 above the maximum system call priority are kept permanently enabled, even
\r
1360 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1361 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1362 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1363 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1364 assigned a priority above the configured maximum system call priority.
\r
1365 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1366 that have been assigned a priority at or (logically) below the maximum
\r
1367 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1368 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1369 More information (albeit Cortex-M specific) is provided on the following
\r
1370 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1371 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1373 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1375 /* Cannot block in an ISR, so check there is data available. */
\r
1376 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1378 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1380 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1381 --( pxQueue->uxMessagesWaiting );
\r
1383 /* If the queue is locked the event list will not be modified.
\r
1384 Instead update the lock count so the task that unlocks the queue
\r
1385 will know that an ISR has removed data while the queue was
\r
1387 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1389 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1391 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1393 /* The task waiting has a higher priority than us so
\r
1394 force a context switch. */
\r
1395 if( pxHigherPriorityTaskWoken != NULL )
\r
1397 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1401 mtCOVERAGE_TEST_MARKER();
\r
1406 mtCOVERAGE_TEST_MARKER();
\r
1411 mtCOVERAGE_TEST_MARKER();
\r
1416 /* Increment the lock count so the task that unlocks the queue
\r
1417 knows that data was removed while it was locked. */
\r
1418 ++( pxQueue->xRxLock );
\r
1426 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1429 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1433 /*-----------------------------------------------------------*/
\r
1435 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1437 BaseType_t xReturn;
\r
1438 UBaseType_t uxSavedInterruptStatus;
\r
1439 int8_t *pcOriginalReadPosition;
\r
1440 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1442 configASSERT( pxQueue );
\r
1443 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1445 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1446 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1447 above the maximum system call priority are kept permanently enabled, even
\r
1448 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1449 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1450 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1451 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1452 assigned a priority above the configured maximum system call priority.
\r
1453 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1454 that have been assigned a priority at or (logically) below the maximum
\r
1455 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1456 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1457 More information (albeit Cortex-M specific) is provided on the following
\r
1458 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1459 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1461 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1463 /* Cannot block in an ISR, so check there is data available. */
\r
1464 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1466 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1468 /* Remember the read position so it can be reset as nothing is
\r
1469 actually being removed from the queue. */
\r
1470 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1471 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1472 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1479 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1482 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1486 /*-----------------------------------------------------------*/
\r
1488 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1490 UBaseType_t uxReturn;
\r
1492 configASSERT( xQueue );
\r
1494 taskENTER_CRITICAL();
\r
1496 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1498 taskEXIT_CRITICAL();
\r
1501 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1502 /*-----------------------------------------------------------*/
\r
1504 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1506 UBaseType_t uxReturn;
\r
1509 pxQueue = ( Queue_t * ) xQueue;
\r
1510 configASSERT( pxQueue );
\r
1512 taskENTER_CRITICAL();
\r
1514 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1516 taskEXIT_CRITICAL();
\r
1519 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1520 /*-----------------------------------------------------------*/
\r
1522 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1524 UBaseType_t uxReturn;
\r
1526 configASSERT( xQueue );
\r
1528 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1531 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1532 /*-----------------------------------------------------------*/
\r
1534 void vQueueDelete( QueueHandle_t xQueue )
\r
1536 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1538 configASSERT( pxQueue );
\r
1540 traceQUEUE_DELETE( pxQueue );
\r
1541 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1543 vQueueUnregisterQueue( pxQueue );
\r
1546 if( pxQueue->pcHead != NULL )
\r
1548 vPortFree( pxQueue->pcHead );
\r
1550 vPortFree( pxQueue );
\r
1552 /*-----------------------------------------------------------*/
\r
1554 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1556 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1558 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1561 #endif /* configUSE_TRACE_FACILITY */
\r
1562 /*-----------------------------------------------------------*/
\r
1564 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1566 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1568 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1571 #endif /* configUSE_TRACE_FACILITY */
\r
1572 /*-----------------------------------------------------------*/
\r
1574 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1576 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1578 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1581 #endif /* configUSE_TRACE_FACILITY */
\r
1582 /*-----------------------------------------------------------*/
\r
1584 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1586 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1588 #if ( configUSE_MUTEXES == 1 )
\r
1590 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1592 /* The mutex is no longer being held. */
\r
1593 vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1594 pxQueue->pxMutexHolder = NULL;
\r
1598 mtCOVERAGE_TEST_MARKER();
\r
1601 #endif /* configUSE_MUTEXES */
\r
1603 else if( xPosition == queueSEND_TO_BACK )
\r
1605 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1606 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1607 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1609 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1613 mtCOVERAGE_TEST_MARKER();
\r
1618 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1619 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1620 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1622 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1626 mtCOVERAGE_TEST_MARKER();
\r
1629 if( xPosition == queueOVERWRITE )
\r
1631 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1633 /* An item is not being added but overwritten, so subtract
\r
1634 one from the recorded number of items in the queue so when
\r
1635 one is added again below the number of recorded items remains
\r
1637 --( pxQueue->uxMessagesWaiting );
\r
1641 mtCOVERAGE_TEST_MARKER();
\r
1646 mtCOVERAGE_TEST_MARKER();
\r
1650 ++( pxQueue->uxMessagesWaiting );
\r
1652 /*-----------------------------------------------------------*/
\r
1654 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1656 if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )
\r
1658 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1659 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1661 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1665 mtCOVERAGE_TEST_MARKER();
\r
1667 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1671 mtCOVERAGE_TEST_MARKER();
\r
1674 /*-----------------------------------------------------------*/
\r
1676 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1678 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1680 /* The lock counts contains the number of extra data items placed or
\r
1681 removed from the queue while the queue was locked. When a queue is
\r
1682 locked items can be added or removed, but the event lists cannot be
\r
1684 taskENTER_CRITICAL();
\r
1686 /* See if data was added to the queue while it was locked. */
\r
1687 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1689 /* Data was posted while the queue was locked. Are any tasks
\r
1690 blocked waiting for data to become available? */
\r
1691 #if ( configUSE_QUEUE_SETS == 1 )
\r
1693 if( pxQueue->pxQueueSetContainer != NULL )
\r
1695 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1697 /* The queue is a member of a queue set, and posting to
\r
1698 the queue set caused a higher priority task to unblock.
\r
1699 A context switch is required. */
\r
1700 vTaskMissedYield();
\r
1704 mtCOVERAGE_TEST_MARKER();
\r
1709 /* Tasks that are removed from the event list will get added to
\r
1710 the pending ready list as the scheduler is still suspended. */
\r
1711 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1713 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1715 /* The task waiting has a higher priority so record that a
\r
1716 context switch is required. */
\r
1717 vTaskMissedYield();
\r
1721 mtCOVERAGE_TEST_MARKER();
\r
1730 #else /* configUSE_QUEUE_SETS */
\r
1732 /* Tasks that are removed from the event list will get added to
\r
1733 the pending ready list as the scheduler is still suspended. */
\r
1734 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1736 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1738 /* The task waiting has a higher priority so record that a
\r
1739 context switch is required. */
\r
1740 vTaskMissedYield();
\r
1744 mtCOVERAGE_TEST_MARKER();
\r
1752 #endif /* configUSE_QUEUE_SETS */
\r
1754 --( pxQueue->xTxLock );
\r
1757 pxQueue->xTxLock = queueUNLOCKED;
\r
1759 taskEXIT_CRITICAL();
\r
1761 /* Do the same for the Rx lock. */
\r
1762 taskENTER_CRITICAL();
\r
1764 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1766 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1768 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1770 vTaskMissedYield();
\r
1774 mtCOVERAGE_TEST_MARKER();
\r
1777 --( pxQueue->xRxLock );
\r
1785 pxQueue->xRxLock = queueUNLOCKED;
\r
1787 taskEXIT_CRITICAL();
\r
1789 /*-----------------------------------------------------------*/
\r
1791 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1793 BaseType_t xReturn;
\r
1795 taskENTER_CRITICAL();
\r
1797 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1803 xReturn = pdFALSE;
\r
1806 taskEXIT_CRITICAL();
\r
1810 /*-----------------------------------------------------------*/
\r
1812 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
1814 BaseType_t xReturn;
\r
1816 configASSERT( xQueue );
\r
1817 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1823 xReturn = pdFALSE;
\r
1827 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1828 /*-----------------------------------------------------------*/
\r
1830 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
1832 BaseType_t xReturn;
\r
1834 taskENTER_CRITICAL();
\r
1836 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
1842 xReturn = pdFALSE;
\r
1845 taskEXIT_CRITICAL();
\r
1849 /*-----------------------------------------------------------*/
\r
1851 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
1853 BaseType_t xReturn;
\r
1855 configASSERT( xQueue );
\r
1856 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
1862 xReturn = pdFALSE;
\r
1866 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1867 /*-----------------------------------------------------------*/
\r
1869 #if ( configUSE_CO_ROUTINES == 1 )
\r
1871 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
1873 BaseType_t xReturn;
\r
1874 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1876 /* If the queue is already full we may have to block. A critical section
\r
1877 is required to prevent an interrupt removing something from the queue
\r
1878 between the check to see if the queue is full and blocking on the queue. */
\r
1879 portDISABLE_INTERRUPTS();
\r
1881 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
1883 /* The queue is full - do we want to block or just leave without
\r
1885 if( xTicksToWait > ( TickType_t ) 0 )
\r
1887 /* As this is called from a coroutine we cannot block directly, but
\r
1888 return indicating that we need to block. */
\r
1889 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
1890 portENABLE_INTERRUPTS();
\r
1891 return errQUEUE_BLOCKED;
\r
1895 portENABLE_INTERRUPTS();
\r
1896 return errQUEUE_FULL;
\r
1900 portENABLE_INTERRUPTS();
\r
1902 portDISABLE_INTERRUPTS();
\r
1904 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1906 /* There is room in the queue, copy the data into the queue. */
\r
1907 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
1910 /* Were any co-routines waiting for data to become available? */
\r
1911 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1913 /* In this instance the co-routine could be placed directly
\r
1914 into the ready list as we are within a critical section.
\r
1915 Instead the same pending ready list mechanism is used as if
\r
1916 the event were caused from within an interrupt. */
\r
1917 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1919 /* The co-routine waiting has a higher priority so record
\r
1920 that a yield might be appropriate. */
\r
1921 xReturn = errQUEUE_YIELD;
\r
1925 mtCOVERAGE_TEST_MARKER();
\r
1930 mtCOVERAGE_TEST_MARKER();
\r
1935 xReturn = errQUEUE_FULL;
\r
1938 portENABLE_INTERRUPTS();
\r
1943 #endif /* configUSE_CO_ROUTINES */
\r
1944 /*-----------------------------------------------------------*/
\r
1946 #if ( configUSE_CO_ROUTINES == 1 )
\r
1948 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
1950 BaseType_t xReturn;
\r
1951 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1953 /* If the queue is already empty we may have to block. A critical section
\r
1954 is required to prevent an interrupt adding something to the queue
\r
1955 between the check to see if the queue is empty and blocking on the queue. */
\r
1956 portDISABLE_INTERRUPTS();
\r
1958 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
1960 /* There are no messages in the queue, do we want to block or just
\r
1961 leave with nothing? */
\r
1962 if( xTicksToWait > ( TickType_t ) 0 )
\r
1964 /* As this is a co-routine we cannot block directly, but return
\r
1965 indicating that we need to block. */
\r
1966 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
1967 portENABLE_INTERRUPTS();
\r
1968 return errQUEUE_BLOCKED;
\r
1972 portENABLE_INTERRUPTS();
\r
1973 return errQUEUE_FULL;
\r
1978 mtCOVERAGE_TEST_MARKER();
\r
1981 portENABLE_INTERRUPTS();
\r
1983 portDISABLE_INTERRUPTS();
\r
1985 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1987 /* Data is available from the queue. */
\r
1988 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1989 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
1991 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1995 mtCOVERAGE_TEST_MARKER();
\r
1997 --( pxQueue->uxMessagesWaiting );
\r
1998 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2002 /* Were any co-routines waiting for space to become available? */
\r
2003 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2005 /* In this instance the co-routine could be placed directly
\r
2006 into the ready list as we are within a critical section.
\r
2007 Instead the same pending ready list mechanism is used as if
\r
2008 the event were caused from within an interrupt. */
\r
2009 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2011 xReturn = errQUEUE_YIELD;
\r
2015 mtCOVERAGE_TEST_MARKER();
\r
2020 mtCOVERAGE_TEST_MARKER();
\r
2028 portENABLE_INTERRUPTS();
\r
2033 #endif /* configUSE_CO_ROUTINES */
\r
2034 /*-----------------------------------------------------------*/
\r
2036 #if ( configUSE_CO_ROUTINES == 1 )
\r
2038 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2040 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2042 /* Cannot block within an ISR so if there is no space on the queue then
\r
2043 exit without doing anything. */
\r
2044 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2046 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2048 /* We only want to wake one co-routine per ISR, so check that a
\r
2049 co-routine has not already been woken. */
\r
2050 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2052 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2054 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2060 mtCOVERAGE_TEST_MARKER();
\r
2065 mtCOVERAGE_TEST_MARKER();
\r
2070 mtCOVERAGE_TEST_MARKER();
\r
2075 mtCOVERAGE_TEST_MARKER();
\r
2078 return xCoRoutinePreviouslyWoken;
\r
2081 #endif /* configUSE_CO_ROUTINES */
\r
2082 /*-----------------------------------------------------------*/
\r
2084 #if ( configUSE_CO_ROUTINES == 1 )
\r
2086 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2088 BaseType_t xReturn;
\r
2089 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2091 /* We cannot block from an ISR, so check there is data available. If
\r
2092 not then just leave without doing anything. */
\r
2093 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2095 /* Copy the data from the queue. */
\r
2096 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2097 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2099 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2103 mtCOVERAGE_TEST_MARKER();
\r
2105 --( pxQueue->uxMessagesWaiting );
\r
2106 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2108 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2110 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2112 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2114 *pxCoRoutineWoken = pdTRUE;
\r
2118 mtCOVERAGE_TEST_MARKER();
\r
2123 mtCOVERAGE_TEST_MARKER();
\r
2128 mtCOVERAGE_TEST_MARKER();
\r
2141 #endif /* configUSE_CO_ROUTINES */
\r
2142 /*-----------------------------------------------------------*/
\r
2144 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2146 void vQueueAddToRegistry( QueueHandle_t xQueue, char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2150 /* See if there is an empty space in the registry. A NULL name denotes
\r
2152 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2154 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2156 /* Store the information on this queue. */
\r
2157 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2158 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2163 mtCOVERAGE_TEST_MARKER();
\r
2168 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2169 /*-----------------------------------------------------------*/
\r
2171 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2173 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2177 /* See if the handle of the queue being unregistered in actually in the
\r
2179 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2181 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2183 /* Set the name to NULL to show that this slot if free again. */
\r
2184 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2189 mtCOVERAGE_TEST_MARKER();
\r
2193 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2195 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2196 /*-----------------------------------------------------------*/
\r
2198 #if ( configUSE_TIMERS == 1 )
\r
2200 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
2202 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2204 /* This function should not be called by application code hence the
\r
2205 'Restricted' in its name. It is not part of the public API. It is
\r
2206 designed for use by kernel code, and has special calling requirements.
\r
2207 It can result in vListInsert() being called on a list that can only
\r
2208 possibly ever have one item in it, so the list will be fast, but even
\r
2209 so it should be called with the scheduler locked and not from a critical
\r
2212 /* Only do anything if there are no messages in the queue. This function
\r
2213 will not actually cause the task to block, just place it on a blocked
\r
2214 list. It will not block until the scheduler is unlocked - at which
\r
2215 time a yield will be performed. If an item is added to the queue while
\r
2216 the queue is locked, and the calling task blocks on the queue, then the
\r
2217 calling task will be immediately unblocked when the queue is unlocked. */
\r
2218 prvLockQueue( pxQueue );
\r
2219 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2221 /* There is nothing in the queue, block for the specified period. */
\r
2222 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2226 mtCOVERAGE_TEST_MARKER();
\r
2228 prvUnlockQueue( pxQueue );
\r
2231 #endif /* configUSE_TIMERS */
\r
2232 /*-----------------------------------------------------------*/
\r
2234 #if ( configUSE_QUEUE_SETS == 1 )
\r
2236 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2238 QueueSetHandle_t pxQueue;
\r
2240 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2245 #endif /* configUSE_QUEUE_SETS */
\r
2246 /*-----------------------------------------------------------*/
\r
2248 #if ( configUSE_QUEUE_SETS == 1 )
\r
2250 BaseType_t xQueueAddToSet( QueueSetMember_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2252 BaseType_t xReturn;
\r
2254 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2256 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2259 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2261 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2262 items in the queue/semaphore. */
\r
2267 taskENTER_CRITICAL();
\r
2269 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2271 taskEXIT_CRITICAL();
\r
2278 #endif /* configUSE_QUEUE_SETS */
\r
2279 /*-----------------------------------------------------------*/
\r
2281 #if ( configUSE_QUEUE_SETS == 1 )
\r
2283 BaseType_t xQueueRemoveFromSet( QueueSetMember_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2285 BaseType_t xReturn;
\r
2286 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2288 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2290 /* The queue was not a member of the set. */
\r
2293 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2295 /* It is dangerous to remove a queue from a set when the queue is
\r
2296 not empty because the queue set will still hold pending events for
\r
2302 taskENTER_CRITICAL();
\r
2304 /* The queue is no longer contained in the set. */
\r
2305 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2307 taskEXIT_CRITICAL();
\r
2312 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2314 #endif /* configUSE_QUEUE_SETS */
\r
2315 /*-----------------------------------------------------------*/
\r
2317 #if ( configUSE_QUEUE_SETS == 1 )
\r
2319 QueueSetMember_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xBlockTimeTicks )
\r
2321 QueueSetMember_t xReturn = NULL;
\r
2323 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xBlockTimeTicks, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2327 #endif /* configUSE_QUEUE_SETS */
\r
2328 /*-----------------------------------------------------------*/
\r
2330 #if ( configUSE_QUEUE_SETS == 1 )
\r
2332 QueueSetMember_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2334 QueueSetMember_t xReturn = NULL;
\r
2336 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2340 #endif /* configUSE_QUEUE_SETS */
\r
2341 /*-----------------------------------------------------------*/
\r
2343 #if ( configUSE_QUEUE_SETS == 1 )
\r
2345 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2347 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2348 BaseType_t xReturn = pdFALSE;
\r
2350 configASSERT( pxQueueSetContainer );
\r
2351 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2353 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2355 traceQUEUE_SEND( pxQueueSetContainer );
\r
2356 /* The data copies is the handle of the queue that contains data. */
\r
2357 prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2358 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2360 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2362 /* The task waiting has a higher priority */
\r
2367 mtCOVERAGE_TEST_MARKER();
\r
2372 mtCOVERAGE_TEST_MARKER();
\r
2377 mtCOVERAGE_TEST_MARKER();
\r
2383 #endif /* configUSE_QUEUE_SETS */
\r