* terminated by a HeapRegions_t structure that has a size of 0. The region \r
* with the lowest start address must appear first in the array.\r
*/\r
-void vPortDefineHeapRegions( HeapRegion_t *pxHeapRegions );\r
+void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions );\r
\r
\r
/*\r
}\r
/*-----------------------------------------------------------*/\r
\r
-void vPortDefineHeapRegions( HeapRegion_t *pxHeapRegions )\r
+void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions )\r
{\r
BlockLink_t *pxFirstFreeBlockInRegion = NULL, *pxPreviousFreeBlock;\r
uint8_t *pucAlignedHeap;\r
size_t xTotalRegionSize, xTotalHeapSize = 0;\r
BaseType_t xDefinedRegions = 0;\r
uint32_t ulAddress;\r
-HeapRegion_t *pxHeapRegion;\r
+const HeapRegion_t *pxHeapRegion;\r
\r
/* Can only call once! */\r
configASSERT( pxEnd == NULL );\r
#define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL )\r
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )\r
\r
+/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */\r
+#define portVECTACTIVE_MASK ( 0x1FUL )\r
+\r
#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )\r
#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )\r
\r
msr msp, r0\r
/* Globally enable interrupts. */\r
cpsie i\r
+ cpsie f\r
dsb\r
isb\r
/* Call SVC to start the first task. */\r
svc 0\r
nop\r
+ nop\r
}\r
/*-----------------------------------------------------------*/\r
\r
uxCriticalNesting++;\r
__dsb( portSY_FULL_READ_WRITE );\r
__isb( portSY_FULL_READ_WRITE );\r
+\r
+ /* This is not the interrupt safe version of the enter critical function so\r
+ assert() if it is being called from an interrupt context. Only API\r
+ functions that end in "FromISR" can be used in an interrupt. Only assert if\r
+ the critical nesting count is 1 to protect against recursive calls if the\r
+ assert function also uses a critical section. */\r
+ if( uxCriticalNesting == 1 )\r
+ {\r
+ configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );\r
+ }\r
}\r
/*-----------------------------------------------------------*/\r
\r
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )\r
#define portPRIGROUP_SHIFT ( 8UL )\r
\r
+/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */\r
+#define portVECTACTIVE_MASK ( 0x1FUL )\r
+\r
/* Constants required to manipulate the VFP. */\r
#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating point context control register. */\r
#define portASPEN_AND_LSPEN_BITS ( 0x3UL << 30UL )\r
msr msp, r0\r
/* Globally enable interrupts. */\r
cpsie i\r
+ cpsie f\r
dsb\r
isb\r
/* Call SVC to start the first task. */\r
svc 0\r
nop\r
+ nop\r
}\r
/*-----------------------------------------------------------*/\r
\r
uxCriticalNesting++;\r
__dsb( portSY_FULL_READ_WRITE );\r
__isb( portSY_FULL_READ_WRITE );\r
+\r
+ /* This is not the interrupt safe version of the enter critical function so\r
+ assert() if it is being called from an interrupt context. Only API\r
+ functions that end in "FromISR" can be used in an interrupt. Only assert if\r
+ the critical nesting count is 1 to protect against recursive calls if the\r
+ assert function also uses a critical section. */\r
+ if( uxCriticalNesting == 1 )\r
+ {\r
+ configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );\r
+ }\r
}\r
/*-----------------------------------------------------------*/\r
\r
}\r
else\r
{\r
- /* The mutex cannot be given because the calling task is not the \r
+ /* The mutex cannot be given because the calling task is not the\r
holder. */\r
xReturn = pdFAIL;\r
\r
{\r
xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
\r
- /* pdPASS will only be returned if the mutex was successfully \r
+ /* pdPASS will only be returned if the mutex was successfully\r
obtained. The calling task may have entered the Blocked state\r
before reaching here. */\r
if( xReturn == pdPASS )\r
mtCOVERAGE_TEST_MARKER();\r
}\r
}\r
+ else if( xYieldRequired != pdFALSE )\r
+ {\r
+ /* This path is a special case that will only get\r
+ executed if the task was holding multiple mutexes and\r
+ the mutexes were given back in an order that is\r
+ different to that in which they were taken. */\r
+ queueYIELD_IF_USING_PREEMPTION();\r
+ } \r
else\r
{\r
mtCOVERAGE_TEST_MARKER();\r
/* The mutex is no longer being held. */\r
vTaskDecrementMutexHeldCount();\r
xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
- pxQueue->pxMutexHolder = NULL; \r
+ pxQueue->pxMutexHolder = NULL;\r
}\r
else\r
{\r