]> git.sur5r.net Git - freertos/commitdiff
Default the definition of portASSERT_IF_IN_ISR() to nothing if it is not defined.
authorrtel <rtel@1d2547de-c912-0410-9cb9-b8ca96c0e9e2>
Mon, 16 Jun 2014 12:51:35 +0000 (12:51 +0000)
committerrtel <rtel@1d2547de-c912-0410-9cb9-b8ca96c0e9e2>
Mon, 16 Jun 2014 12:51:35 +0000 (12:51 +0000)
Helper updates to allow a count of the number of mutexes held to be added.
Updates to the CCS Cortex-R4 implementation necessitated by a change in compiler semantics.
Update PIC32MX and MZ ports to assert if a non ISR safe function is called from an ISR.

git-svn-id: https://svn.code.sf.net/p/freertos/code/trunk@2263 1d2547de-c912-0410-9cb9-b8ca96c0e9e2

FreeRTOS/Source/include/FreeRTOS.h
FreeRTOS/Source/include/task.h
FreeRTOS/Source/portable/CCS/ARM_Cortex-R4/portASM.asm
FreeRTOS/Source/portable/IAR/ARM_CA9/port.c
FreeRTOS/Source/portable/MPLAB/PIC32MX/portmacro.h
FreeRTOS/Source/portable/MPLAB/PIC32MZ/portmacro.h
FreeRTOS/Source/queue.c

index 7b67af33a05f7f6af91f0d01ea0f8c6532857cf4..7bf350f0df11c174a244246ca7d4f949e59931f3 100644 (file)
@@ -717,6 +717,10 @@ is included as it is used by the port layer. */
        #define mtCOVERAGE_TEST_MARKER()\r
 #endif\r
 \r
+#ifndef portASSERT_IF_IN_ISR\r
+       #define portASSERT_IF_IN_ISR()\r
+#endif\r
+\r
 /* Definitions to allow backward compatibility with FreeRTOS versions prior to\r
 V8 if desired. */\r
 #ifndef configENABLE_BACKWARD_COMPATIBILITY\r
index ae279e1d6000905f9963571fb59b30618e8f4ed1..e8fc4559aec00788dfdea55a92ad4743038cd947 100644 (file)
@@ -1507,7 +1507,7 @@ void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTIO
  * Set the priority of a task back to its proper priority in the case that it\r
  * inherited a higher priority while it was holding a semaphore.\r
  */\r
-void vTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION;\r
+BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION;\r
 \r
 /*\r
  * Generic version of the task creation function which is in turn called by the\r
@@ -1552,6 +1552,15 @@ void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION;
  */\r
 eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION;\r
 \r
+/*\r
+ * For internal use only.  Increment the mutex held count when a mutex is\r
+ * taken and decrement the mutex held count when the mutex is given back\r
+ * respectively.  The mutex held count is used to know when it is safe to\r
+ * disinherit a priority.\r
+ */\r
+void vTaskIncrementMutexHeldCount( void );\r
+void vTaskDecrementMutexHeldCount( void );\r
+\r
 #ifdef __cplusplus\r
 }\r
 #endif\r
index 6e7aef6a799b572eb35dfaad117eae2255ee6f81..51b6802fc314c40e765838eea778ff0e34e68701 100644 (file)
@@ -1,7 +1,7 @@
 ;/*\r
 ;    FreeRTOS V8.0.1 - Copyright (C) 2014 Real Time Engineers Ltd.\r
 ;    All rights reserved\r
-;      \r
+;\r
 ;\r
 ;    ***************************************************************************\r
 ;     *                                                                       *\r
@@ -61,7 +61,7 @@
 \r
 ;/*-----------------------------------------------------------*/\r
 ;\r
-; Save Task Context \r
+; Save Task Context\r
 ;\r
 portSAVE_CONTEXT .macro\r
                DSB\r
@@ -101,7 +101,7 @@ portSAVE_CONTEXT .macro
 \r
                ; If the task is not using a floating point context then skip the\r
                ; saving of the FPU registers.\r
-               BEQ             PC+3\r
+               BEQ             $+16\r
                FSTMDBD LR!, {D0-D15}\r
                FMRX    R1,  FPSCR\r
                STMFD   LR!, {R1}\r
@@ -137,7 +137,7 @@ portRESTORE_CONTEXT .macro
 \r
                ; If the task is not using a floating point context then skip the\r
                ; VFP register loads.\r
-               BEQ             PC+3\r
+               BEQ             $+16\r
 \r
                ; Restore the floating point context.\r
                LDMFD   LR!, {R0}\r
index fc23dcd0ed1d78a1bbb3254bef75f6a5d6e3d171..674aab9a15cbce3094b7a7bda32e8e08ac14c3b8 100644 (file)
@@ -315,6 +315,16 @@ void vPortEnterCritical( void )
        directly.  Increment ulCriticalNesting to keep a count of how many times\r
        portENTER_CRITICAL() has been called. */\r
        ulCriticalNesting++;\r
+       \r
+       /* This is not the interrupt safe version of the enter critical function so\r
+       assert() if it is being called from an interrupt context.  Only API \r
+       functions that end in "FromISR" can be used in an interrupt.  Only assert if\r
+       the critical nesting count is 1 to protect against recursive calls if the\r
+       assert function also uses a critical section. */\r
+       if( ulCriticalNesting == 1 )\r
+       {\r
+               configASSERT( ulPortInterruptNesting == 0 );\r
+       }\r
 }\r
 /*-----------------------------------------------------------*/\r
 \r
index be11cbf77f3249db9d2ef07e3b04e9f1771c7c12..b748a811aa1bddce17d4f95f97a6a89bb76ab30b 100644 (file)
@@ -204,8 +204,8 @@ uint32_t ulCause;                                                   \
        _CP0_SET_CAUSE( ulCause );                                      \\r
 }\r
 \r
-#define portCURRENT_INTERRUPT_PRIORITY ( ( _CP0_GET_STATUS() & portALL_IPL_BITS ) >> portIPL_SHIFT )\r
-#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( portCURRENT_INTERRUPT_PRIORITY <= configMAX_SYSCALL_INTERRUPT_PRIORITY )\r
+extern volatile UBaseType_t uxInterruptNesting;\r
+#define portASSERT_IF_IN_ISR() configASSERT( uxInterruptNesting == 0 )\r
 \r
 #define portNOP()      __asm volatile ( "nop" )\r
 \r
index 6209de68454dac91603f6986448341ae5d2708a7..4b90ae4ad62b4c6016d7ad1608712d603599d48a 100644 (file)
@@ -206,8 +206,8 @@ uint32_t ulCause;                                                   \
        _CP0_SET_CAUSE( ulCause );                                      \\r
 }\r
 \r
-#define portCURRENT_INTERRUPT_PRIORITY ( ( _CP0_GET_STATUS() & portALL_IPL_BITS ) >> portIPL_SHIFT )\r
-#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( portCURRENT_INTERRUPT_PRIORITY <= configMAX_SYSCALL_INTERRUPT_PRIORITY )\r
+extern volatile UBaseType_t uxInterruptNesting;\r
+#define portASSERT_IF_IN_ISR() configASSERT( uxInterruptNesting == 0 )\r
 \r
 #define portNOP()      __asm volatile ( "nop" )\r
 \r
index cc7a9a82ddfc3e1378571ab82e864ca29ebb7845..48e05fd41bf8719da8c7d95a8dce5da120f6cf30 100644 (file)
@@ -216,7 +216,7 @@ static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
  * Copies an item into the queue, either at the front of the queue or the\r
  * back of the queue.\r
  */\r
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
 \r
 /*\r
  * Copies an item out of a queue.\r
@@ -421,7 +421,10 @@ QueueHandle_t xReturn = NULL;
 \r
                        traceCREATE_MUTEX( pxNewQueue );\r
 \r
-                       /* Start with the semaphore in the expected state. */\r
+                       /* Start with the semaphore in the expected state.  Preload the\r
+                        mutex held count as calling xQueueGenericSend() will decrement the\r
+                        count back to 0. */\r
+                       vTaskIncrementMutexHeldCount();\r
                        ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
                }\r
                else\r
@@ -508,7 +511,8 @@ QueueHandle_t xReturn = NULL;
                }\r
                else\r
                {\r
-                       /* We cannot give the mutex because we are not the holder. */\r
+                       /* The mutex cannot be given because the calling task is not the \r
+                       holder. */\r
                        xReturn = pdFAIL;\r
 \r
                        traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
@@ -543,8 +547,9 @@ QueueHandle_t xReturn = NULL;
                {\r
                        xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
 \r
-                       /* pdPASS will only be returned if we successfully obtained the mutex,\r
-                       we may have blocked to reach here. */\r
+                       /* pdPASS will only be returned if the mutex was successfully \r
+                       obtained.  The calling task may have entered the Blocked state\r
+                       before reaching here. */\r
                        if( xReturn == pdPASS )\r
                        {\r
                                ( pxMutex->u.uxRecursiveCallCount )++;\r
@@ -592,7 +597,7 @@ QueueHandle_t xReturn = NULL;
 \r
 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
 {\r
-BaseType_t xEntryTimeSet = pdFALSE;\r
+BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
 TimeOut_t xTimeOut;\r
 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
 \r
@@ -620,7 +625,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                        if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
                        {\r
                                traceQUEUE_SEND( pxQueue );\r
-                               prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+                               xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
 \r
                                #if ( configUSE_QUEUE_SETS == 1 )\r
                                {\r
@@ -657,6 +662,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                                                mtCOVERAGE_TEST_MARKER();\r
                                                        }\r
                                                }\r
+                                               else if( xYieldRequired != pdFALSE )\r
+                                               {\r
+                                                       /* This path is a special case that will only get\r
+                                                       executed if the task was holding multiple mutexes\r
+                                                       and the mutexes were given back in an order that is\r
+                                                       different to that in which they were taken. */\r
+                                                       queueYIELD_IF_USING_PREEMPTION();\r
+                                               }\r
                                                else\r
                                                {\r
                                                        mtCOVERAGE_TEST_MARKER();\r
@@ -690,9 +703,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                                #endif /* configUSE_QUEUE_SETS */\r
 \r
                                taskEXIT_CRITICAL();\r
-\r
-                               /* Return to the original privilege level before exiting the\r
-                               function. */\r
                                return pdPASS;\r
                        }\r
                        else\r
@@ -1059,7 +1069,20 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
                {\r
                        traceQUEUE_SEND_FROM_ISR( pxQueue );\r
 \r
-                       prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
+                       if( prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ) != pdFALSE )\r
+                       {\r
+                               /* This is a special case that can only be executed if a task\r
+                               holds multiple mutexes and then gives the mutexes back in an\r
+                               order that is different to that in which they were taken. */\r
+                               if( pxHigherPriorityTaskWoken != NULL )\r
+                               {\r
+                                       *pxHigherPriorityTaskWoken = pdTRUE;\r
+                               }\r
+                               else\r
+                               {\r
+                                       mtCOVERAGE_TEST_MARKER();\r
+                               }\r
+                       }\r
 \r
                        /* The event list is not altered if the queue is locked.  This will\r
                        be done when the queue is unlocked later. */\r
@@ -1591,8 +1614,10 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
 #endif /* configUSE_TRACE_FACILITY */\r
 /*-----------------------------------------------------------*/\r
 \r
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
 {\r
+BaseType_t xReturn = pdFALSE;\r
+\r
        if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
        {\r
                #if ( configUSE_MUTEXES == 1 )\r
@@ -1600,8 +1625,9 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
                        if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
                        {\r
                                /* The mutex is no longer being held. */\r
-                               vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
-                               pxQueue->pxMutexHolder = NULL;\r
+                               vTaskDecrementMutexHeldCount();\r
+                               xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
+                               pxQueue->pxMutexHolder = NULL;                          \r
                        }\r
                        else\r
                        {\r
@@ -1658,6 +1684,8 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
        }\r
 \r
        ++( pxQueue->uxMessagesWaiting );\r
+\r
+       return xReturn;\r
 }\r
 /*-----------------------------------------------------------*/\r
 \r
@@ -1678,7 +1706,8 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
        }\r
        else\r
        {\r
-               mtCOVERAGE_TEST_MARKER();\r
+               /* A mutex was taken. */\r
+               vTaskIncrementMutexHeldCount();\r
        }\r
 }\r
 /*-----------------------------------------------------------*/\r
@@ -2367,8 +2396,9 @@ BaseType_t xReturn;
                if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
                {\r
                        traceQUEUE_SEND( pxQueueSetContainer );\r
-                       /* The data copies is the handle of the queue that contains data. */\r
-                       prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
+                       /* The data copied is the handle of the queue that contains data. */\r
+                       xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
+\r
                        if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
                        {\r
                                if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r