*----------------------------------------------------------*/\r
\r
/* Constants used with the cRxLock and cTxLock structure members. */\r
-#define queueUNLOCKED ( ( signed portBASE_TYPE ) -1 )\r
+#define queueUNLOCKED ( ( signed portBASE_TYPE ) -1 )\r
+#define queueLOCKED_UNMODIFIED ( ( signed portBASE_TYPE ) 0 )\r
+\r
#define queueERRONEOUS_UNBLOCK ( -1 )\r
\r
/* For internal use only. */\r
portBASE_TYPE xQueueGiveMutexRecursive( xQueueHandle xMutex );\r
signed portBASE_TYPE xQueueAltGenericSend( xQueueHandle pxQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition );\r
signed portBASE_TYPE xQueueAltGenericReceive( xQueueHandle pxQueue, const void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking );\r
-portBASE_TYPE xQueueIsQueueEmptyFromISR( const xQueueHandle pxQueue );\r
-portBASE_TYPE xQueueIsQueueFullFromISR( const xQueueHandle pxQueue );\r
+signed portBASE_TYPE xQueueIsQueueEmptyFromISR( const xQueueHandle pxQueue );\r
+signed portBASE_TYPE xQueueIsQueueFullFromISR( const xQueueHandle pxQueue );\r
unsigned portBASE_TYPE uxQueueMessagesWaitingFromISR( const xQueueHandle pxQueue );\r
\r
\r
* Macro to mark a queue as locked. Locking a queue prevents an ISR from\r
* accessing the queue event lists.\r
*/\r
-#define prvLockQueue( pxQueue ) \\r
-{ \\r
- taskENTER_CRITICAL(); \\r
- ++( pxQueue->xRxLock ); \\r
- ++( pxQueue->xTxLock ); \\r
- taskEXIT_CRITICAL(); \\r
+#define prvLockQueue( pxQueue ) \\r
+{ \\r
+ taskENTER_CRITICAL(); \\r
+ { \\r
+ if( pxQueue->xRxLock == queueUNLOCKED ) \\r
+ { \\r
+ pxQueue->xRxLock = queueLOCKED_UNMODIFIED; \\r
+ } \\r
+ if( pxQueue->xTxLock == queueUNLOCKED ) \\r
+ { \\r
+ pxQueue->xTxLock = queueLOCKED_UNMODIFIED; \\r
+ } \\r
+ } \\r
+ taskEXIT_CRITICAL(); \\r
}\r
/*-----------------------------------------------------------*/\r
\r
mutual exclusion is required to test the pxMutexHolder variable. */\r
if( pxMutex->pxMutexHolder == xTaskGetCurrentTaskHandle() )\r
{\r
+ traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
+\r
/* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
the task handle, therefore no underflow check is required. Also,\r
uxRecursiveCallCount is only modified by the mutex holder, and as\r
xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
}\r
\r
- xReturn = pdPASS;\r
-\r
- traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
+ xReturn = pdPASS; \r
}\r
else\r
{\r
/* Comments regarding mutual exclusion as per those within\r
xQueueGiveMutexRecursive(). */\r
\r
+ traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
+\r
if( pxMutex->pxMutexHolder == xTaskGetCurrentTaskHandle() )\r
{\r
( pxMutex->uxRecursiveCallCount )++;\r
{\r
( pxMutex->uxRecursiveCallCount )++;\r
}\r
- }\r
-\r
- traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
+ } \r
\r
return xReturn;\r
}\r
updated. */\r
taskENTER_CRITICAL();\r
{\r
- --( pxQueue->xTxLock );\r
-\r
/* See if data was added to the queue while it was locked. */\r
- if( pxQueue->xTxLock > queueUNLOCKED )\r
+ while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
{\r
- pxQueue->xTxLock = queueUNLOCKED;\r
-\r
/* Data was posted while the queue was locked. Are any tasks\r
blocked waiting for data to become available? */\r
if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) )\r
context switch is required. */\r
vTaskMissedYield();\r
}\r
- } \r
+\r
+ --( pxQueue->xTxLock );\r
+ }\r
+ else\r
+ {\r
+ break;\r
+ }\r
}\r
+\r
+ pxQueue->xTxLock = queueUNLOCKED;\r
}\r
taskEXIT_CRITICAL();\r
\r
/* Do the same for the Rx lock. */\r
taskENTER_CRITICAL();\r
{\r
- --( pxQueue->xRxLock );\r
-\r
- if( pxQueue->xRxLock > queueUNLOCKED )\r
+ while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
{\r
- pxQueue->xRxLock = queueUNLOCKED;\r
-\r
if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) )\r
{\r
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
{\r
vTaskMissedYield();\r
}\r
- } \r
+\r
+ --( pxQueue->xRxLock );\r
+ }\r
+ else\r
+ {\r
+ break;\r
+ }\r
}\r
+\r
+ pxQueue->xRxLock = queueUNLOCKED;\r
}\r
taskEXIT_CRITICAL();\r
}\r