--- /dev/null
+/*******************************************************************************\r
+ * Tracealyzer v2.4.1 Recorder Library\r
+ * Percepio AB, www.percepio.com\r
+ *\r
+ * trcKernelPort.h\r
+ *\r
+ * Kernel-specific functionality for FreeRTOS, used by the recorder library.\r
+ * \r
+ * Terms of Use\r
+ * This software is copyright Percepio AB. The recorder library is free for\r
+ * use together with Percepio products. You may distribute the recorder library\r
+ * in its original form, including modifications in trcHardwarePort.c/.h\r
+ * given that these modification are clearly marked as your own modifications\r
+ * and documented in the initial comment section of these source files. \r
+ * This software is the intellectual property of Percepio AB and may not be \r
+ * sold or in other ways commercially redistributed without explicit written \r
+ * permission by Percepio AB.\r
+ *\r
+ * Disclaimer \r
+ * The trace tool and recorder library is being delivered to you AS IS and \r
+ * Percepio AB makes no warranty as to its use or performance. Percepio AB does \r
+ * not and cannot warrant the performance or results you may obtain by using the \r
+ * software or documentation. Percepio AB make no warranties, express or \r
+ * implied, as to noninfringement of third party rights, merchantability, or \r
+ * fitness for any particular purpose. In no event will Percepio AB, its \r
+ * technology partners, or distributors be liable to you for any consequential, \r
+ * incidental or special damages, including any lost profits or lost savings, \r
+ * even if a representative of Percepio AB has been advised of the possibility \r
+ * of such damages, or for any claim by any third party. Some jurisdictions do \r
+ * not allow the exclusion or limitation of incidental, consequential or special \r
+ * damages, or the exclusion of implied warranties or limitations on how long an \r
+ * implied warranty may last, so the above limitations may not apply to you.\r
+ *\r
+ * Copyright Percepio AB, 2013.\r
+ * www.percepio.com\r
+ ******************************************************************************/\r
+\r
+\r
+#ifndef TRCKERNELPORT_H_\r
+#define TRCKERNELPORT_H_\r
+\r
+#include "FreeRTOS.h" // Defines configUSE_TRACE_FACILITY\r
+\r
+#define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY\r
+\r
+#if (USE_TRACEALYZER_RECORDER == 1)\r
+\r
+/* Defines that must be set for the recorder to work properly */\r
+#define TRACE_KERNEL_VERSION 0x1AA1\r
+#define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOS.h" */\r
+#define TRACE_PERIPHERAL_CLOCK_HZ configPERIPHERAL_CLOCK_HZ /* Defined in "FreeRTOS.h" */\r
+#define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */\r
+#define TRACE_CPU_CLOCKS_PER_TICK configCPU_CLOCKS_PER_TICK /* Defined in "FreeRTOS.h" */\r
+\r
+/************************************************************************/\r
+/* KERNEL SPECIFIC OBJECT CONFIGURATION */\r
+/************************************************************************/\r
+#define TRACE_NCLASSES 5\r
+#define TRACE_CLASS_QUEUE ((traceObjectClass)0)\r
+#define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)\r
+#define TRACE_CLASS_MUTEX ((traceObjectClass)2)\r
+#define TRACE_CLASS_TASK ((traceObjectClass)3)\r
+#define TRACE_CLASS_ISR ((traceObjectClass)4)\r
+\r
+#define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR)\r
+\r
+/* The size of the Object Property Table entries, in bytes, per object */\r
+\r
+/* Queue properties (except name): current number of message in queue */\r
+#define PropertyTableSizeQueue (NameLenQueue + 1) \r
+\r
+/* Semaphore properties (except name): state (signaled = 1, cleared = 0) */\r
+#define PropertyTableSizeSemaphore (NameLenSemaphore + 1) \r
+\r
+/* Mutex properties (except name): owner (task handle, 0 = free) */\r
+#define PropertyTableSizeMutex (NameLenMutex + 1) \r
+\r
+/* Task properties (except name): Byte 0: Current priority\r
+ Byte 1: state (if already active) \r
+ Byte 2: legacy, not used\r
+ Byte 3: legacy, not used */\r
+#define PropertyTableSizeTask (NameLenTask + 4)\r
+\r
+/* ISR properties: Byte 0: priority\r
+ Byte 1: state (if already active) */\r
+#define PropertyTableSizeISR (NameLenISR + 2)\r
+\r
+/* The layout of the byte array representing the Object Property Table */\r
+#define StartIndexQueue 0\r
+#define StartIndexSemaphore StartIndexQueue + NQueue * PropertyTableSizeQueue\r
+#define StartIndexMutex StartIndexSemaphore + NSemaphore * PropertyTableSizeSemaphore\r
+#define StartIndexTask StartIndexMutex + NMutex * PropertyTableSizeMutex\r
+#define StartIndexISR StartIndexTask + NTask * PropertyTableSizeTask\r
+\r
+/* Number of bytes used by the object table */\r
+#define TRACE_OBJECT_TABLE_SIZE StartIndexISR + NISR * PropertyTableSizeISR\r
+\r
+\r
+/* Includes */\r
+#include "trcTypes.h"\r
+#include "trcConfig.h"\r
+#include "trcHooks.h"\r
+#include "trcHardwarePort.h"\r
+#include "trcBase.h"\r
+#include "trcKernel.h"\r
+#include "trcUser.h"\r
+\r
+/* Initialization of the object property table */\r
+void vTraceInitObjectPropertyTable(void);\r
+\r
+/* Initialization of the handle mechanism, see e.g, xTraceGetObjectHandle */\r
+void vTraceInitObjectHandleStack(void);\r
+\r
+/* Returns the "Not enough handles" error message for the specified object class */\r
+const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);\r
+\r
+/*******************************************************************************\r
+ * The event codes - should match the offline config file.\r
+ * \r
+ * Some sections below are encoded to allow for constructions like:\r
+ *\r
+ * vTraceStoreKernelCall(EVENTGROUP_CREATE + objectclass, ...\r
+ *\r
+ * The object class ID is given by the three LSB bits, in such cases. Since each \r
+ * object class has a separate object property table, the class ID is needed to \r
+ * know what section in the object table to use for getting an object name from\r
+ * an object handle. \r
+ ******************************************************************************/\r
+\r
+#define NULL_EVENT (0x00) /* Ignored in the analysis*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_DIV\r
+ *\r
+ * Miscellaneous events.\r
+ ******************************************************************************/\r
+#define EVENTGROUP_DIV (NULL_EVENT + 1) /*0x01*/\r
+#define DIV_XPS (EVENTGROUP_DIV + 0) /*0x01*/\r
+#define DIV_TASK_READY (EVENTGROUP_DIV + 1) /*0x02*/\r
+#define DIV_NEW_TIME (EVENTGROUP_DIV + 2) /*0x03*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_TS\r
+ *\r
+ * Events for storing task-switches and interrupts. The RESUME events are \r
+ * generated if the task/interrupt is already marked active.\r
+ ******************************************************************************/\r
+#define EVENTGROUP_TS (EVENTGROUP_DIV + 3) /*0x04*/\r
+#define TS_ISR_BEGIN (EVENTGROUP_TS + 0) /*0x04*/\r
+#define TS_ISR_RESUME (EVENTGROUP_TS + 1) /*0x05*/\r
+#define TS_TASK_BEGIN (EVENTGROUP_TS + 2) /*0x06*/\r
+#define TS_TASK_RESUME (EVENTGROUP_TS + 3) /*0x07*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_OBJCLOSE_NAME\r
+ * \r
+ * About Close Events\r
+ * When an object is evicted from the object property table (object close), two \r
+ * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and \r
+ * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object \r
+ * properties valid up to this point.\r
+ ******************************************************************************/\r
+#define EVENTGROUP_OBJCLOSE_NAME (EVENTGROUP_TS + 4) /*0x08*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_OBJCLOSE_PROP\r
+ * \r
+ * The internal event carrying properties of deleted objects\r
+ * The handle and object class of the closed object is not stored in this event, \r
+ * but is assumed to be the same as in the preceding CLOSE event. Thus, these \r
+ * two events must be generated from within a critical section. \r
+ * When queues are closed, arg1 is the "state" property (i.e., number of \r
+ * buffered messages/signals).\r
+ * When actors are closed, arg1 is priority, arg2 is handle of the "instance \r
+ * finish" event, and arg3 is event code of the "instance finish" event. \r
+ * In this case, the lower three bits is the object class of the instance finish \r
+ * handle. The lower three bits are not used (always zero) when queues are \r
+ * closed since the queue type is given in the previous OBJCLOSE_NAME event.\r
+ ******************************************************************************/\r
+#define EVENTGROUP_OBJCLOSE_PROP (EVENTGROUP_OBJCLOSE_NAME + 8) /*0x10*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_CREATE\r
+ * \r
+ * The events in this group are used to log Kernel object creations.\r
+ * The lower three bits in the event code gives the object class, i.e., type of\r
+ * create operation (task, queue, semaphore, etc).\r
+ ******************************************************************************/\r
+#define EVENTGROUP_CREATE_SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8) /*0x18*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_SEND\r
+ * \r
+ * The events in this group are used to log Send/Give events on queues, \r
+ * semaphores and mutexes The lower three bits in the event code gives the \r
+ * object class, i.e., what type of object that is operated on (queue, semaphore \r
+ * or mutex).\r
+ ******************************************************************************/\r
+#define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_SUCCESS + 8) /*0x20*/\r
+\r
+/*******************************************************************************\r
+ * EVENTGROUP_RECEIVE\r
+ * \r
+ * The events in this group are used to log Receive/Take events on queues, \r
+ * semaphores and mutexes. The lower three bits in the event code gives the \r
+ * object class, i.e., what type of object that is operated on (queue, semaphore\r
+ * or mutex).\r
+ ******************************************************************************/\r
+#define EVENTGROUP_RECEIVE_SUCCESS (EVENTGROUP_SEND_SUCCESS + 8) /*0x28*/\r
+\r
+/* Send/Give operations, from ISR */\r
+#define EVENTGROUP_SEND_FROM_ISR_SUCCESS (EVENTGROUP_RECEIVE_SUCCESS + 8) /*0x30*/\r
+\r
+/* Receive/Take operations, from ISR */\r
+#define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8) /*0x38*/\r
+\r
+/* "Failed" event type versions of above (timeout, failed allocation, etc) */\r
+#define EVENTGROUP_KSE_FAILED (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8) /*0x40*/\r
+\r
+/* Failed create calls - memory allocation failed */\r
+#define EVENTGROUP_CREATE_FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/\r
+\r
+/* Failed send/give - timeout! */\r
+#define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_FAILED + 8) /*0x48*/\r
+\r
+/* Failed receive/take - timeout! */\r
+#define EVENTGROUP_RECEIVE_FAILED (EVENTGROUP_SEND_FAILED + 8) /*0x50*/\r
+\r
+/* Failed non-blocking send/give - queue full */\r
+#define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8) /*0x58*/\r
+\r
+/* Failed non-blocking receive/take - queue empty */\r
+#define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \\r
+ (EVENTGROUP_SEND_FROM_ISR_FAILED + 8) /*0x60*/\r
+\r
+/* Events when blocking on receive/take */\r
+#define EVENTGROUP_RECEIVE_BLOCK \\r
+ (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8) /*0x68*/\r
+\r
+/* Events when blocking on send/give */\r
+#define EVENTGROUP_SEND_BLOCK (EVENTGROUP_RECEIVE_BLOCK + 8) /*0x70*/\r
+\r
+/* Events on queue peek (receive) */\r
+#define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8) /*0x78*/\r
+\r
+/* Events on object delete (vTaskDelete or vQueueDelete) */\r
+#define EVENTGROUP_DELETE_SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8) /*0x80*/\r
+\r
+/* Other events - object class is implied: TASK */\r
+#define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_SUCCESS + 8) /*0x88*/\r
+#define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0) /*0x88*/\r
+#define TASK_DELAY (EVENTGROUP_OTHERS + 1) /*0x89*/\r
+#define TASK_SUSPEND (EVENTGROUP_OTHERS + 2) /*0x8A*/\r
+#define TASK_RESUME (EVENTGROUP_OTHERS + 3) /*0x8B*/\r
+#define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4) /*0x8C*/\r
+#define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5) /*0x8D*/\r
+#define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6) /*0x8E*/\r
+#define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7) /*0x8F*/\r
+\r
+/* Not yet used */\r
+#define EVENTGROUP_FTRACE_PLACEHOLDER (EVENTGROUP_OTHERS + 8) /*0x90*/\r
+\r
+/* User events */\r
+#define EVENTGROUP_USEREVENT (EVENTGROUP_FTRACE_PLACEHOLDER + 8) /*0x98*/\r
+#define USER_EVENT (EVENTGROUP_USEREVENT + 0)\r
+\r
+/* Allow for 0-15 arguments (the number of args is added to event code) */\r
+#define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15) /*0xA7*/\r
+\r
+/*******************************************************************************\r
+ * XTS Event - eXtended TimeStamp events\r
+ * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.\r
+ * the time since the last stored event. The DTS fields are either 1 or 2 bytes \r
+ * in the other events, depending on the bytes available in the event struct. \r
+ * If the time since the last event (the DTS) is larger than allowed for by \r
+ * the DTS field of the current event, an XTS event is inserted immediately \r
+ * before the original event. The XTS event contains up to 3 additional bytes \r
+ * of the DTS value - the higher bytes of the true DTS value. The lower 1-2 \r
+ * bytes are stored in the normal DTS field. \r
+ * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored \r
+ * when there is only room for 1 byte (8 bit) DTS data in the original event, \r
+ * which means a limit of 0xFF (255). The XTS16 is used when the original event \r
+ * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535).\r
+ * \r
+ * Using a very high frequency time base can result in many XTS events. \r
+ * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,\r
+ * at most 65535. If your time base has a higher frequency, you can define\r
+ * the TRACE\r
+ ******************************************************************************/\r
+\r
+#define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16) /*0xA8*/\r
+#define XTS8 (EVENTGROUP_SYS + 0) /*0xA8*/\r
+#define XTS16 (EVENTGROUP_SYS + 1) /*0xA9*/\r
+\r
+#define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2) /*0xAA*/\r
+\r
+#define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3) /*0xAB*/\r
+\r
+\r
+\r
+/************************************************************************/\r
+/* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE */\r
+/* FUNCTIONALITY REQUESTED BY THE TRACE RECORDER */\r
+/************************************************************************/\r
+\r
+/******************************************************************************\r
+ * TraceObjectClassTable\r
+ * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).\r
+ * This was added since we want to map both types of Mutex and both types of \r
+ * Semaphores on common classes for all Mutexes and all Semaphores respectively. \r
+ * \r
+ * FreeRTOS Queue types\r
+ * #define queueQUEUE_TYPE_BASE (0U) => TRACE_CLASS_QUEUE\r
+ * #define queueQUEUE_TYPE_MUTEX (1U) => TRACE_CLASS_MUTEX\r
+ * #define queueQUEUE_TYPE_COUNTING_SEMAPHORE (2U) => TRACE_CLASS_SEMAPHORE\r
+ * #define queueQUEUE_TYPE_BINARY_SEMAPHORE (3U) => TRACE_CLASS_SEMAPHORE\r
+ * #define queueQUEUE_TYPE_RECURSIVE_MUTEX (4U) => TRACE_CLASS_MUTEX \r
+ ******************************************************************************/\r
+\r
+extern traceObjectClass TraceObjectClassTable[5];\r
+\r
+/* These functions are implemented in the .c file since certain header files must not be included in this one */\r
+objectHandleType prvTraceGetObjectNumber(void* handle);\r
+unsigned char prvTraceGetObjectType(void* handle);\r
+objectHandleType prvTraceGetTaskNumber(void* handle);\r
+unsigned char prvTraceIsSchedulerActive(void);\r
+unsigned char prvTraceIsSchedulerSuspended(void);\r
+unsigned char prvTraceIsSchedulerStarted(void);\r
+void prvTraceEnterCritical(void);\r
+void prvTraceExitCritical(void);\r
+void* prvTraceGetCurrentTaskHandle(void);\r
+\r
+\r
+/************************************************************************/\r
+/* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER */\r
+/************************************************************************/\r
+\r
+#define TRACE_MALLOC(size) pvPortMalloc(size)\r
+\r
+#define TRACE_ENTER_CRITICAL_SECTION() prvTraceEnterCritical();\r
+#define TRACE_EXIT_CRITICAL_SECTION() prvTraceExitCritical();\r
+\r
+#define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()\r
+#define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()\r
+#define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()\r
+#define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()\r
+\r
+#define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)\r
+#define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)\r
+#define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))\r
+#define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = xTraceGetObjectHandle(TRACE_CLASS_TASK);\r
+\r
+#define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]\r
+#define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))\r
+\r
+#define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))\r
+#define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->ucQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));\r
+\r
+#define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))\r
+#define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))\r
+#define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)\r
+\r
+\r
+\r
+/************************************************************************/\r
+/* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL */\r
+/************************************************************************/\r
+\r
+/* Called for each task that becomes ready */\r
+#undef traceMOVED_TASK_TO_READY_STATE\r
+#define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \\r
+ trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);\r
+\r
+/* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */\r
+#undef traceTASK_INCREMENT_TICK\r
+#define traceTASK_INCREMENT_TICK( xTickCount ) \\r
+ if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \\r
+ if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\r
+\r
+/* Called on each task-switch */\r
+#undef traceTASK_SWITCHED_IN\r
+#define traceTASK_SWITCHED_IN() \\r
+ trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());\r
+\r
+/* Called on vTaskSuspend */\r
+#undef traceTASK_SUSPEND\r
+#define traceTASK_SUSPEND( pxTaskToSuspend ) \\r
+ trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);\r
+\r
+/* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */\r
+#undef traceTASK_DELAY\r
+#define traceTASK_DELAY() \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \\r
+ trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED,pxCurrentTCB); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */\r
+#undef traceTASK_DELAY_UNTIL\r
+#define traceTASK_DELAY_UNTIL() \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \\r
+ trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED,pxCurrentTCB); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+#if (INCLUDE_OBJECT_DELETE == 1)\r
+/* Called on vTaskDelete */\r
+#undef traceTASK_DELETE\r
+#define traceTASK_DELETE( pxTaskToDelete ) \\r
+ trcKERNEL_HOOKS_TASK_DELETE(DELETE, pxTaskToDelete);\r
+#endif\r
+\r
+#if (INCLUDE_OBJECT_DELETE == 1)\r
+/* Called on vQueueDelete */\r
+#undef traceQUEUE_DELETE\r
+#define traceQUEUE_DELETE( pxQueue ) \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_OBJECT_DELETE(DELETE, UNUSED, pxQueue); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+#endif\r
+\r
+/* Called on vTaskCreate */\r
+#undef traceTASK_CREATE\r
+#define traceTASK_CREATE(pxNewTCB) \\r
+ if (pxNewTCB != NULL) \\r
+ { \\r
+ trcKERNEL_HOOKS_TASK_CREATE(CREATE, pxNewTCB); \\r
+ }\r
+\r
+/* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */\r
+#undef traceTASK_CREATE_FAILED\r
+#define traceTASK_CREATE_FAILED() \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */\r
+#undef traceQUEUE_CREATE\r
+#define traceQUEUE_CREATE( pxNewQueue )\\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_OBJECT_CREATE(CREATE, UNUSED, pxNewQueue); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called in xQueueCreate, if the queue creation fails */\r
+#undef traceQUEUE_CREATE_FAILED\r
+#define traceQUEUE_CREATE_FAILED( queueType ) \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE, UNUSED, queueType); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */\r
+#undef traceCREATE_MUTEX\r
+#define traceCREATE_MUTEX( pxNewQueue ) \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_OBJECT_CREATE(CREATE, UNUSED, pxNewQueue); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */\r
+#undef traceCREATE_MUTEX_FAILED\r
+#define traceCREATE_MUTEX_FAILED() \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE, UNUSED, queueQUEUE_TYPE_MUTEX); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called when the Mutex can not be given, since not holder */\r
+#undef traceGIVE_MUTEX_RECURSIVE_FAILED\r
+#define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxMutex); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called when a message is sent to a queue */\r
+#undef traceQUEUE_SEND\r
+#define traceQUEUE_SEND( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, UNUSED, pxQueue); \\r
+ trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1)); /*For mutex, store the new owner rather than queue length */\r
+\r
+/* Called when a message failed to be sent to a queue (timeout) */\r
+#undef traceQUEUE_SEND_FAILED\r
+#define traceQUEUE_SEND_FAILED( pxQueue ) \\r
+ TRACE_ENTER_CRITICAL_SECTION();\\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxQueue); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called when the task is blocked due to a send operation on a full queue */\r
+#undef traceBLOCKING_ON_QUEUE_SEND\r
+#define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \\r
+ TRACE_ENTER_CRITICAL_SECTION();\\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, UNUSED, pxQueue); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called when a message is received from a queue */\r
+#undef traceQUEUE_RECEIVE\r
+#define traceQUEUE_RECEIVE( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, UNUSED, pxQueue); \\r
+ trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1)); /*For mutex, store the new owner rather than queue length */\r
+\r
+/* Called when a receive operation on a queue fails (timeout) */\r
+#undef traceQUEUE_RECEIVE_FAILED\r
+#define traceQUEUE_RECEIVE_FAILED( pxQueue ) \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, UNUSED, pxQueue); \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called when the task is blocked due to a receive operation on an empty queue */\r
+#undef traceBLOCKING_ON_QUEUE_RECEIVE\r
+#define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \\r
+ TRACE_ENTER_CRITICAL_SECTION(); \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, UNUSED, pxQueue); \\r
+ if (TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \\r
+ { \\r
+ trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED, pxQueue); \\r
+ } \\r
+ TRACE_EXIT_CRITICAL_SECTION();\r
+\r
+/* Called on xQueuePeek */\r
+#undef traceQUEUE_PEEK\r
+#define traceQUEUE_PEEK( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, UNUSED, pxQueue);\r
+\r
+/* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */\r
+#undef traceQUEUE_SEND_FROM_ISR\r
+#define traceQUEUE_SEND_FROM_ISR( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
+ trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));\r
+\r
+/* Called when a message send from interrupt context fails (since the queue was full) */\r
+#undef traceQUEUE_SEND_FROM_ISR_FAILED\r
+#define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, UNUSED, pxQueue);\r
+\r
+/* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */\r
+#undef traceQUEUE_RECEIVE_FROM_ISR\r
+#define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
+ trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));\r
+\r
+/* Called when a message receive from interrupt context fails (since the queue was empty) */\r
+#undef traceQUEUE_RECEIVE_FROM_ISR_FAILED\r
+#define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \\r
+ trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, UNUSED, pxQueue);\r
+\r
+/* Called in vTaskPrioritySet */\r
+#undef traceTASK_PRIORITY_SET\r
+#define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \\r
+ trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);\r
+\r
+/* Called in vTaskPriorityInherit, which is called by Mutex operations */\r
+#undef traceTASK_PRIORITY_INHERIT\r
+#define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \\r
+ trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);\r
+\r
+/* Called in vTaskPriorityDisinherit, which is called by Mutex operations */\r
+#undef traceTASK_PRIORITY_DISINHERIT\r
+#define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \\r
+ trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);\r
+\r
+/* Called in vTaskResume */\r
+#undef traceTASK_RESUME\r
+#define traceTASK_RESUME( pxTaskToResume ) \\r
+ trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);\r
+\r
+/* Called in vTaskResumeFromISR */\r
+#undef traceTASK_RESUME_FROM_ISR\r
+#define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \\r
+ trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);\r
+\r
+\r
+/************************************************************************/\r
+/* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE */\r
+/************************************************************************/\r
+\r
+/* Returns the exclude state of the object */\r
+uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType handle);\r
+\r
+#define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
+#define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
+#define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
+\r
+#define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
+#define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
+#define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
+\r
+#define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
+#define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
+#define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
+\r
+#define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
+#define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
+#define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
+\r
+#define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
+switch (objectclass) \\r
+{ \\r
+case TRACE_CLASS_QUEUE: \\r
+ TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+case TRACE_CLASS_SEMAPHORE: \\r
+ TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+case TRACE_CLASS_MUTEX: \\r
+ TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+case TRACE_CLASS_TASK: \\r
+ TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+}\r
+\r
+#define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
+switch (objectclass) \\r
+{ \\r
+case TRACE_CLASS_QUEUE: \\r
+ TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+case TRACE_CLASS_SEMAPHORE: \\r
+ TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+case TRACE_CLASS_MUTEX: \\r
+ TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+case TRACE_CLASS_TASK: \\r
+ TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \\r
+ break; \\r
+}\r
+\r
+/* Task */\r
+#define vTraceExcludeTaskFromTrace(handle) \\r
+TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
+\r
+#define vTraceIncludeTaskInTrace(handle) \\r
+TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
+\r
+\r
+/* Queue */\r
+#define vTraceExcludeQueueFromTrace(handle) \\r
+TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
+\r
+#define vTraceIncludeQueueInTrace(handle) \\r
+TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
+\r
+\r
+/* Semaphore */\r
+#define vTraceExcludeSemaphoreFromTrace(handle) \\r
+TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
+\r
+#define vTraceIncludeSemaphoreInTrace(handle) \\r
+TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
+\r
+\r
+/* Mutex */\r
+#define vTraceExcludeMutexFromTrace(handle) \\r
+TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
+\r
+#define vTraceIncludeMutexInTrace(handle) \\r
+TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
+\r
+\r
+/* Kernel Services */\r
+#define vTraceExcludeKernelServiceDelayFromTrace() \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
+\r
+#define vTraceIncludeKernelServiceDelayInTrace() \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
+\r
+/* HELPER MACROS FOR KERNEL SERVICES FOR OBJECTS */\r
+#define vTraceExcludeKernelServiceSendFromTrace_HELPER(class) \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
+\r
+#define vTraceIncludeKernelServiceSendInTrace_HELPER(class) \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
+\r
+#define vTraceExcludeKernelServiceReceiveFromTrace_HELPER(class) \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
+TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
+\r
+#define vTraceIncludeKernelServiceReceiveInTrace_HELPER(class) \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
+TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
+\r
+/* EXCLUDE AND INCLUDE FOR QUEUE */\r
+#define vTraceExcludeKernelServiceQueueSendFromTrace() \\r
+vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
+\r
+#define vTraceIncludeKernelServiceQueueSendInTrace() \\r
+vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_QUEUE);\r
+\r
+#define vTraceExcludeKernelServiceQueueReceiveFromTrace() \\r
+vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
+\r
+#define vTraceIncludeKernelServiceQueueReceiveInTrace() \\r
+vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_QUEUE);\r
+\r
+/* EXCLUDE AND INCLUDE FOR SEMAPHORE */\r
+#define vTraceExcludeKernelServiceSemaphoreSendFromTrace() \\r
+vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
+\r
+#define vTraceIncludeKernelServicSemaphoreSendInTrace() \\r
+vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
+\r
+#define vTraceExcludeKernelServiceSemaphoreReceiveFromTrace() \\r
+vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
+\r
+#define vTraceIncludeKernelServiceSemaphoreReceiveInTrace() \\r
+vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
+\r
+/* EXCLUDE AND INCLUDE FOR MUTEX */\r
+#define vTraceExcludeKernelServiceMutexSendFromTrace() \\r
+vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
+\r
+#define vTraceIncludeKernelServiceMutexSendInTrace() \\r
+vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_MUTEX);\r
+\r
+#define vTraceExcludeKernelServiceMutexReceiveFromTrace() \\r
+vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
+\r
+#define vTraceIncludeKernelServiceMutexReceiveInTrace() \\r
+vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_MUTEX);\r
+\r
+/************************************************************************/\r
+/* KERNEL SPECIFIC MACROS TO NAME OBJECTS, IF NECESSARY */\r
+/************************************************************************/\r
+#define vTraceSetQueueName(object, name) \\r
+vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
+\r
+#define vTraceSetSemaphoreName(object, name) \\r
+vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
+\r
+#define vTraceSetMutexName(object, name) \\r
+vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
+\r
+#endif\r
+\r
+#endif /* TRCKERNELPORT_H_ */
\ No newline at end of file
if (nISRactive || !inExcludedTask)\r
{\r
/* Check if the referenced object or the event code is excluded */\r
- if (!uiTraceIsObjectExcluded(objectClass, objectNumber) && !TRACE_GET_EVENT_CODE_FLAG_ISEXCLUDED(ecode))\r
+ if (!uiTraceIsObjectExcluded(objectClass, (objectHandleType)objectNumber) && !TRACE_GET_EVENT_CODE_FLAG_ISEXCLUDED(ecode))\r
{\r
trcCRITICAL_SECTION_BEGIN();\r
dts1 = (uint16_t)prvTraceGetDTS(0xFFFF);\r
}\r
\r
/* Check if the referenced object or the event code is excluded */\r
- if (!uiTraceIsObjectExcluded(objectClass, objectNumber) && !TRACE_GET_EVENT_CODE_FLAG_ISEXCLUDED(evtcode))\r
+ if (!uiTraceIsObjectExcluded(objectClass, (objectHandleType)objectNumber) && !TRACE_GET_EVENT_CODE_FLAG_ISEXCLUDED(evtcode))\r
{\r
trcCRITICAL_SECTION_BEGIN();\r
dts2 = (uint8_t)prvTraceGetDTS(0xFF);\r
+++ /dev/null
-/*******************************************************************************\r
- * Tracealyzer v2.4.1 Recorder Library\r
- * Percepio AB, www.percepio.com\r
- *\r
- * trcKernelPort.h\r
- *\r
- * Kernel-specific functionality for FreeRTOS, used by the recorder library.\r
- * \r
- * Terms of Use\r
- * This software is copyright Percepio AB. The recorder library is free for\r
- * use together with Percepio products. You may distribute the recorder library\r
- * in its original form, including modifications in trcHardwarePort.c/.h\r
- * given that these modification are clearly marked as your own modifications\r
- * and documented in the initial comment section of these source files. \r
- * This software is the intellectual property of Percepio AB and may not be \r
- * sold or in other ways commercially redistributed without explicit written \r
- * permission by Percepio AB.\r
- *\r
- * Disclaimer \r
- * The trace tool and recorder library is being delivered to you AS IS and \r
- * Percepio AB makes no warranty as to its use or performance. Percepio AB does \r
- * not and cannot warrant the performance or results you may obtain by using the \r
- * software or documentation. Percepio AB make no warranties, express or \r
- * implied, as to noninfringement of third party rights, merchantability, or \r
- * fitness for any particular purpose. In no event will Percepio AB, its \r
- * technology partners, or distributors be liable to you for any consequential, \r
- * incidental or special damages, including any lost profits or lost savings, \r
- * even if a representative of Percepio AB has been advised of the possibility \r
- * of such damages, or for any claim by any third party. Some jurisdictions do \r
- * not allow the exclusion or limitation of incidental, consequential or special \r
- * damages, or the exclusion of implied warranties or limitations on how long an \r
- * implied warranty may last, so the above limitations may not apply to you.\r
- *\r
- * Copyright Percepio AB, 2013.\r
- * www.percepio.com\r
- ******************************************************************************/\r
-\r
-\r
-#ifndef TRCKERNELPORT_H_\r
-#define TRCKERNELPORT_H_\r
-\r
-#include "FreeRTOS.h" // Defines configUSE_TRACE_FACILITY\r
-\r
-#define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY\r
-\r
-#if (USE_TRACEALYZER_RECORDER == 1)\r
-\r
-/* Defines that must be set for the recorder to work properly */\r
-#define TRACE_KERNEL_VERSION 0x1AA1\r
-#define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOS.h" */\r
-#define TRACE_PERIPHERAL_CLOCK_HZ configPERIPHERAL_CLOCK_HZ /* Defined in "FreeRTOS.h" */\r
-#define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */\r
-#define TRACE_CPU_CLOCKS_PER_TICK configCPU_CLOCKS_PER_TICK /* Defined in "FreeRTOS.h" */\r
-\r
-/************************************************************************/\r
-/* KERNEL SPECIFIC OBJECT CONFIGURATION */\r
-/************************************************************************/\r
-#define TRACE_NCLASSES 5\r
-#define TRACE_CLASS_QUEUE ((traceObjectClass)0)\r
-#define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)\r
-#define TRACE_CLASS_MUTEX ((traceObjectClass)2)\r
-#define TRACE_CLASS_TASK ((traceObjectClass)3)\r
-#define TRACE_CLASS_ISR ((traceObjectClass)4)\r
-\r
-#define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR)\r
-\r
-/* The size of the Object Property Table entries, in bytes, per object */\r
-\r
-/* Queue properties (except name): current number of message in queue */\r
-#define PropertyTableSizeQueue (NameLenQueue + 1) \r
-\r
-/* Semaphore properties (except name): state (signaled = 1, cleared = 0) */\r
-#define PropertyTableSizeSemaphore (NameLenSemaphore + 1) \r
-\r
-/* Mutex properties (except name): owner (task handle, 0 = free) */\r
-#define PropertyTableSizeMutex (NameLenMutex + 1) \r
-\r
-/* Task properties (except name): Byte 0: Current priority\r
- Byte 1: state (if already active) \r
- Byte 2: legacy, not used\r
- Byte 3: legacy, not used */\r
-#define PropertyTableSizeTask (NameLenTask + 4)\r
-\r
-/* ISR properties: Byte 0: priority\r
- Byte 1: state (if already active) */\r
-#define PropertyTableSizeISR (NameLenISR + 2)\r
-\r
-/* The layout of the byte array representing the Object Property Table */\r
-#define StartIndexQueue 0\r
-#define StartIndexSemaphore StartIndexQueue + NQueue * PropertyTableSizeQueue\r
-#define StartIndexMutex StartIndexSemaphore + NSemaphore * PropertyTableSizeSemaphore\r
-#define StartIndexTask StartIndexMutex + NMutex * PropertyTableSizeMutex\r
-#define StartIndexISR StartIndexTask + NTask * PropertyTableSizeTask\r
-\r
-/* Number of bytes used by the object table */\r
-#define TRACE_OBJECT_TABLE_SIZE StartIndexISR + NISR * PropertyTableSizeISR\r
-\r
-\r
-/* Includes */\r
-#include "trcTypes.h"\r
-#include "trcConfig.h"\r
-#include "trcKernelHooks.h"\r
-#include "trcHardwarePort.h"\r
-#include "trcBase.h"\r
-#include "trcKernel.h"\r
-#include "trcUser.h"\r
-\r
-/* Initialization of the object property table */\r
-void vTraceInitObjectPropertyTable(void);\r
-\r
-/* Initialization of the handle mechanism, see e.g, xTraceGetObjectHandle */\r
-void vTraceInitObjectHandleStack(void);\r
-\r
-/* Returns the "Not enough handles" error message for the specified object class */\r
-const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);\r
-\r
-/*******************************************************************************\r
- * The event codes - should match the offline config file.\r
- * \r
- * Some sections below are encoded to allow for constructions like:\r
- *\r
- * vTraceStoreKernelCall(EVENTGROUP_CREATE + objectclass, ...\r
- *\r
- * The object class ID is given by the three LSB bits, in such cases. Since each \r
- * object class has a separate object property table, the class ID is needed to \r
- * know what section in the object table to use for getting an object name from\r
- * an object handle. \r
- ******************************************************************************/\r
-\r
-#define NULL_EVENT (0x00) /* Ignored in the analysis*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_DIV\r
- *\r
- * Miscellaneous events.\r
- ******************************************************************************/\r
-#define EVENTGROUP_DIV (NULL_EVENT + 1) /*0x01*/\r
-#define DIV_XPS (EVENTGROUP_DIV + 0) /*0x01*/\r
-#define DIV_TASK_READY (EVENTGROUP_DIV + 1) /*0x02*/\r
-#define DIV_NEW_TIME (EVENTGROUP_DIV + 2) /*0x03*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_TS\r
- *\r
- * Events for storing task-switches and interrupts. The RESUME events are \r
- * generated if the task/interrupt is already marked active.\r
- ******************************************************************************/\r
-#define EVENTGROUP_TS (EVENTGROUP_DIV + 3) /*0x04*/\r
-#define TS_ISR_BEGIN (EVENTGROUP_TS + 0) /*0x04*/\r
-#define TS_ISR_RESUME (EVENTGROUP_TS + 1) /*0x05*/\r
-#define TS_TASK_BEGIN (EVENTGROUP_TS + 2) /*0x06*/\r
-#define TS_TASK_RESUME (EVENTGROUP_TS + 3) /*0x07*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_OBJCLOSE_NAME\r
- * \r
- * About Close Events\r
- * When an object is evicted from the object property table (object close), two \r
- * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and \r
- * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object \r
- * properties valid up to this point.\r
- ******************************************************************************/\r
-#define EVENTGROUP_OBJCLOSE_NAME (EVENTGROUP_TS + 4) /*0x08*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_OBJCLOSE_PROP\r
- * \r
- * The internal event carrying properties of deleted objects\r
- * The handle and object class of the closed object is not stored in this event, \r
- * but is assumed to be the same as in the preceding CLOSE event. Thus, these \r
- * two events must be generated from within a critical section. \r
- * When queues are closed, arg1 is the "state" property (i.e., number of \r
- * buffered messages/signals).\r
- * When actors are closed, arg1 is priority, arg2 is handle of the "instance \r
- * finish" event, and arg3 is event code of the "instance finish" event. \r
- * In this case, the lower three bits is the object class of the instance finish \r
- * handle. The lower three bits are not used (always zero) when queues are \r
- * closed since the queue type is given in the previous OBJCLOSE_NAME event.\r
- ******************************************************************************/\r
-#define EVENTGROUP_OBJCLOSE_PROP (EVENTGROUP_OBJCLOSE_NAME + 8) /*0x10*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_CREATE\r
- * \r
- * The events in this group are used to log Kernel object creations.\r
- * The lower three bits in the event code gives the object class, i.e., type of\r
- * create operation (task, queue, semaphore, etc).\r
- ******************************************************************************/\r
-#define EVENTGROUP_CREATE_SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8) /*0x18*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_SEND\r
- * \r
- * The events in this group are used to log Send/Give events on queues, \r
- * semaphores and mutexes The lower three bits in the event code gives the \r
- * object class, i.e., what type of object that is operated on (queue, semaphore \r
- * or mutex).\r
- ******************************************************************************/\r
-#define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_SUCCESS + 8) /*0x20*/\r
-\r
-/*******************************************************************************\r
- * EVENTGROUP_RECEIVE\r
- * \r
- * The events in this group are used to log Receive/Take events on queues, \r
- * semaphores and mutexes. The lower three bits in the event code gives the \r
- * object class, i.e., what type of object that is operated on (queue, semaphore\r
- * or mutex).\r
- ******************************************************************************/\r
-#define EVENTGROUP_RECEIVE_SUCCESS (EVENTGROUP_SEND_SUCCESS + 8) /*0x28*/\r
-\r
-/* Send/Give operations, from ISR */\r
-#define EVENTGROUP_SEND_FROM_ISR_SUCCESS (EVENTGROUP_RECEIVE_SUCCESS + 8) /*0x30*/\r
-\r
-/* Receive/Take operations, from ISR */\r
-#define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8) /*0x38*/\r
-\r
-/* "Failed" event type versions of above (timeout, failed allocation, etc) */\r
-#define EVENTGROUP_KSE_FAILED (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8) /*0x40*/\r
-\r
-/* Failed create calls - memory allocation failed */\r
-#define EVENTGROUP_CREATE_FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/\r
-\r
-/* Failed send/give - timeout! */\r
-#define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_FAILED + 8) /*0x48*/\r
-\r
-/* Failed receive/take - timeout! */\r
-#define EVENTGROUP_RECEIVE_FAILED (EVENTGROUP_SEND_FAILED + 8) /*0x50*/\r
-\r
-/* Failed non-blocking send/give - queue full */\r
-#define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8) /*0x58*/\r
-\r
-/* Failed non-blocking receive/take - queue empty */\r
-#define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \\r
- (EVENTGROUP_SEND_FROM_ISR_FAILED + 8) /*0x60*/\r
-\r
-/* Events when blocking on receive/take */\r
-#define EVENTGROUP_RECEIVE_BLOCK \\r
- (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8) /*0x68*/\r
-\r
-/* Events when blocking on send/give */\r
-#define EVENTGROUP_SEND_BLOCK (EVENTGROUP_RECEIVE_BLOCK + 8) /*0x70*/\r
-\r
-/* Events on queue peek (receive) */\r
-#define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8) /*0x78*/\r
-\r
-/* Events on object delete (vTaskDelete or vQueueDelete) */\r
-#define EVENTGROUP_DELETE_SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8) /*0x80*/\r
-\r
-/* Other events - object class is implied: TASK */\r
-#define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_SUCCESS + 8) /*0x88*/\r
-#define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0) /*0x88*/\r
-#define TASK_DELAY (EVENTGROUP_OTHERS + 1) /*0x89*/\r
-#define TASK_SUSPEND (EVENTGROUP_OTHERS + 2) /*0x8A*/\r
-#define TASK_RESUME (EVENTGROUP_OTHERS + 3) /*0x8B*/\r
-#define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4) /*0x8C*/\r
-#define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5) /*0x8D*/\r
-#define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6) /*0x8E*/\r
-#define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7) /*0x8F*/\r
-\r
-/* Not yet used */\r
-#define EVENTGROUP_FTRACE_PLACEHOLDER (EVENTGROUP_OTHERS + 8) /*0x90*/\r
-\r
-/* User events */\r
-#define EVENTGROUP_USEREVENT (EVENTGROUP_FTRACE_PLACEHOLDER + 8) /*0x98*/\r
-#define USER_EVENT (EVENTGROUP_USEREVENT + 0)\r
-\r
-/* Allow for 0-15 arguments (the number of args is added to event code) */\r
-#define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15) /*0xA7*/\r
-\r
-/*******************************************************************************\r
- * XTS Event - eXtended TimeStamp events\r
- * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.\r
- * the time since the last stored event. The DTS fields are either 1 or 2 bytes \r
- * in the other events, depending on the bytes available in the event struct. \r
- * If the time since the last event (the DTS) is larger than allowed for by \r
- * the DTS field of the current event, an XTS event is inserted immediately \r
- * before the original event. The XTS event contains up to 3 additional bytes \r
- * of the DTS value - the higher bytes of the true DTS value. The lower 1-2 \r
- * bytes are stored in the normal DTS field. \r
- * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored \r
- * when there is only room for 1 byte (8 bit) DTS data in the original event, \r
- * which means a limit of 0xFF (255). The XTS16 is used when the original event \r
- * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535).\r
- * \r
- * Using a very high frequency time base can result in many XTS events. \r
- * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,\r
- * at most 65535. If your time base has a higher frequency, you can define\r
- * the TRACE\r
- ******************************************************************************/\r
-\r
-#define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16) /*0xA8*/\r
-#define XTS8 (EVENTGROUP_SYS + 0) /*0xA8*/\r
-#define XTS16 (EVENTGROUP_SYS + 1) /*0xA9*/\r
-\r
-#define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2) /*0xAA*/\r
-\r
-#define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3) /*0xAB*/\r
-\r
-\r
-\r
-/************************************************************************/\r
-/* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE */\r
-/* FUNCTIONALITY REQUESTED BY THE TRACE RECORDER */\r
-/************************************************************************/\r
-\r
-/******************************************************************************\r
- * TraceObjectClassTable\r
- * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).\r
- * This was added since we want to map both types of Mutex and both types of \r
- * Semaphores on common classes for all Mutexes and all Semaphores respectively. \r
- * \r
- * FreeRTOS Queue types\r
- * #define queueQUEUE_TYPE_BASE (0U) => TRACE_CLASS_QUEUE\r
- * #define queueQUEUE_TYPE_MUTEX (1U) => TRACE_CLASS_MUTEX\r
- * #define queueQUEUE_TYPE_COUNTING_SEMAPHORE (2U) => TRACE_CLASS_SEMAPHORE\r
- * #define queueQUEUE_TYPE_BINARY_SEMAPHORE (3U) => TRACE_CLASS_SEMAPHORE\r
- * #define queueQUEUE_TYPE_RECURSIVE_MUTEX (4U) => TRACE_CLASS_MUTEX \r
- ******************************************************************************/\r
-\r
-extern traceObjectClass TraceObjectClassTable[5];\r
-\r
-/* These functions are implemented in the .c file since certain header files must not be included in this one */\r
-objectHandleType prvTraceGetObjectNumber(void* handle);\r
-unsigned char prvTraceGetObjectType(void* handle);\r
-objectHandleType prvTraceGetTaskNumber(void* handle);\r
-unsigned char prvTraceIsSchedulerActive(void);\r
-unsigned char prvTraceIsSchedulerSuspended(void);\r
-unsigned char prvTraceIsSchedulerStarted(void);\r
-void prvTraceEnterCritical(void);\r
-void prvTraceExitCritical(void);\r
-void* prvTraceGetCurrentTaskHandle(void);\r
-\r
-\r
-/************************************************************************/\r
-/* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER */\r
-/************************************************************************/\r
-\r
-#define TRACE_MALLOC(size) pvPortMalloc(size)\r
-\r
-#define TRACE_ENTER_CRITICAL_SECTION() prvTraceEnterCritical();\r
-#define TRACE_EXIT_CRITICAL_SECTION() prvTraceExitCritical();\r
-\r
-#define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()\r
-#define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()\r
-#define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()\r
-#define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()\r
-\r
-#define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)\r
-#define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)\r
-#define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))\r
-#define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = xTraceGetObjectHandle(TRACE_CLASS_TASK);\r
-\r
-#define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]\r
-#define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))\r
-\r
-#define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))\r
-#define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->ucQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));\r
-\r
-#define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))\r
-#define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))\r
-#define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)\r
-\r
-\r
-\r
-/************************************************************************/\r
-/* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL */\r
-/************************************************************************/\r
-\r
-/* Called for each task that becomes ready */\r
-#undef traceMOVED_TASK_TO_READY_STATE\r
-#define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \\r
- trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);\r
-\r
-/* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */\r
-#undef traceTASK_INCREMENT_TICK\r
-#define traceTASK_INCREMENT_TICK( xTickCount ) \\r
- if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \\r
- if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\r
-\r
-/* Called on each task-switch */\r
-#undef traceTASK_SWITCHED_IN\r
-#define traceTASK_SWITCHED_IN() \\r
- trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());\r
-\r
-/* Called on vTaskSuspend */\r
-#undef traceTASK_SUSPEND\r
-#define traceTASK_SUSPEND( pxTaskToSuspend ) \\r
- trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);\r
-\r
-/* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */\r
-#undef traceTASK_DELAY\r
-#define traceTASK_DELAY() \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \\r
- trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED,pxCurrentTCB); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */\r
-#undef traceTASK_DELAY_UNTIL\r
-#define traceTASK_DELAY_UNTIL() \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \\r
- trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED,pxCurrentTCB); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-#if (INCLUDE_OBJECT_DELETE == 1)\r
-/* Called on vTaskDelete */\r
-#undef traceTASK_DELETE\r
-#define traceTASK_DELETE( pxTaskToDelete ) \\r
- trcKERNEL_HOOKS_TASK_DELETE(DELETE, pxTaskToDelete);\r
-#endif\r
-\r
-#if (INCLUDE_OBJECT_DELETE == 1)\r
-/* Called on vQueueDelete */\r
-#undef traceQUEUE_DELETE\r
-#define traceQUEUE_DELETE( pxQueue ) \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_OBJECT_DELETE(DELETE, UNUSED, pxQueue); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-#endif\r
-\r
-/* Called on vTaskCreate */\r
-#undef traceTASK_CREATE\r
-#define traceTASK_CREATE(pxNewTCB) \\r
- if (pxNewTCB != NULL) \\r
- { \\r
- trcKERNEL_HOOKS_TASK_CREATE(CREATE, pxNewTCB); \\r
- }\r
-\r
-/* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */\r
-#undef traceTASK_CREATE_FAILED\r
-#define traceTASK_CREATE_FAILED() \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */\r
-#undef traceQUEUE_CREATE\r
-#define traceQUEUE_CREATE( pxNewQueue )\\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_OBJECT_CREATE(CREATE, UNUSED, pxNewQueue); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called in xQueueCreate, if the queue creation fails */\r
-#undef traceQUEUE_CREATE_FAILED\r
-#define traceQUEUE_CREATE_FAILED( queueType ) \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE, UNUSED, queueType); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */\r
-#undef traceCREATE_MUTEX\r
-#define traceCREATE_MUTEX( pxNewQueue ) \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_OBJECT_CREATE(CREATE, UNUSED, pxNewQueue); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */\r
-#undef traceCREATE_MUTEX_FAILED\r
-#define traceCREATE_MUTEX_FAILED() \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE, UNUSED, queueQUEUE_TYPE_MUTEX); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called when the Mutex can not be given, since not holder */\r
-#undef traceGIVE_MUTEX_RECURSIVE_FAILED\r
-#define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxMutex); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called when a message is sent to a queue */\r
-#undef traceQUEUE_SEND\r
-#define traceQUEUE_SEND( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, UNUSED, pxQueue); \\r
- trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1)); /*For mutex, store the new owner rather than queue length */\r
-\r
-/* Called when a message failed to be sent to a queue (timeout) */\r
-#undef traceQUEUE_SEND_FAILED\r
-#define traceQUEUE_SEND_FAILED( pxQueue ) \\r
- TRACE_ENTER_CRITICAL_SECTION();\\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxQueue); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called when the task is blocked due to a send operation on a full queue */\r
-#undef traceBLOCKING_ON_QUEUE_SEND\r
-#define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \\r
- TRACE_ENTER_CRITICAL_SECTION();\\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, UNUSED, pxQueue); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called when a message is received from a queue */\r
-#undef traceQUEUE_RECEIVE\r
-#define traceQUEUE_RECEIVE( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, UNUSED, pxQueue); \\r
- trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1)); /*For mutex, store the new owner rather than queue length */\r
-\r
-/* Called when a receive operation on a queue fails (timeout) */\r
-#undef traceQUEUE_RECEIVE_FAILED\r
-#define traceQUEUE_RECEIVE_FAILED( pxQueue ) \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, UNUSED, pxQueue); \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called when the task is blocked due to a receive operation on an empty queue */\r
-#undef traceBLOCKING_ON_QUEUE_RECEIVE\r
-#define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \\r
- TRACE_ENTER_CRITICAL_SECTION(); \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, UNUSED, pxQueue); \\r
- if (TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \\r
- { \\r
- trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED, pxQueue); \\r
- } \\r
- TRACE_EXIT_CRITICAL_SECTION();\r
-\r
-/* Called on xQueuePeek */\r
-#undef traceQUEUE_PEEK\r
-#define traceQUEUE_PEEK( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, UNUSED, pxQueue);\r
-\r
-/* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */\r
-#undef traceQUEUE_SEND_FROM_ISR\r
-#define traceQUEUE_SEND_FROM_ISR( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
- trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));\r
-\r
-/* Called when a message send from interrupt context fails (since the queue was full) */\r
-#undef traceQUEUE_SEND_FROM_ISR_FAILED\r
-#define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, UNUSED, pxQueue);\r
-\r
-/* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */\r
-#undef traceQUEUE_RECEIVE_FROM_ISR\r
-#define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
- trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));\r
-\r
-/* Called when a message receive from interrupt context fails (since the queue was empty) */\r
-#undef traceQUEUE_RECEIVE_FROM_ISR_FAILED\r
-#define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \\r
- trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, UNUSED, pxQueue);\r
-\r
-/* Called in vTaskPrioritySet */\r
-#undef traceTASK_PRIORITY_SET\r
-#define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \\r
- trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);\r
-\r
-/* Called in vTaskPriorityInherit, which is called by Mutex operations */\r
-#undef traceTASK_PRIORITY_INHERIT\r
-#define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \\r
- trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);\r
-\r
-/* Called in vTaskPriorityDisinherit, which is called by Mutex operations */\r
-#undef traceTASK_PRIORITY_DISINHERIT\r
-#define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \\r
- trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);\r
-\r
-/* Called in vTaskResume */\r
-#undef traceTASK_RESUME\r
-#define traceTASK_RESUME( pxTaskToResume ) \\r
- trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);\r
-\r
-/* Called in vTaskResumeFromISR */\r
-#undef traceTASK_RESUME_FROM_ISR\r
-#define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \\r
- trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);\r
-\r
-\r
-/************************************************************************/\r
-/* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE */\r
-/************************************************************************/\r
-\r
-/* Returns the exclude state of the object */\r
-uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType handle);\r
-\r
-#define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
-#define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
-#define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
-\r
-#define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
-#define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
-#define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
-\r
-#define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
-#define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
-#define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
-\r
-#define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
-#define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
-#define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
-\r
-#define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
-switch (objectclass) \\r
-{ \\r
-case TRACE_CLASS_QUEUE: \\r
- TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-case TRACE_CLASS_SEMAPHORE: \\r
- TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-case TRACE_CLASS_MUTEX: \\r
- TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-case TRACE_CLASS_TASK: \\r
- TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-}\r
-\r
-#define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
-switch (objectclass) \\r
-{ \\r
-case TRACE_CLASS_QUEUE: \\r
- TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-case TRACE_CLASS_SEMAPHORE: \\r
- TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-case TRACE_CLASS_MUTEX: \\r
- TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-case TRACE_CLASS_TASK: \\r
- TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \\r
- break; \\r
-}\r
-\r
-/* Task */\r
-#define vTraceExcludeTaskFromTrace(handle) \\r
-TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
-\r
-#define vTraceIncludeTaskInTrace(handle) \\r
-TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
-\r
-\r
-/* Queue */\r
-#define vTraceExcludeQueueFromTrace(handle) \\r
-TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
-\r
-#define vTraceIncludeQueueInTrace(handle) \\r
-TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
-\r
-\r
-/* Semaphore */\r
-#define vTraceExcludeSemaphoreFromTrace(handle) \\r
-TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
-\r
-#define vTraceIncludeSemaphoreInTrace(handle) \\r
-TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
-\r
-\r
-/* Mutex */\r
-#define vTraceExcludeMutexFromTrace(handle) \\r
-TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
-\r
-#define vTraceIncludeMutexInTrace(handle) \\r
-TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
-\r
-\r
-/* Kernel Services */\r
-#define vTraceExcludeKernelServiceDelayFromTrace() \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
-\r
-#define vTraceIncludeKernelServiceDelayInTrace() \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
-\r
-/* HELPER MACROS FOR KERNEL SERVICES FOR OBJECTS */\r
-#define vTraceExcludeKernelServiceSendFromTrace_HELPER(class) \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
-\r
-#define vTraceIncludeKernelServiceSendInTrace_HELPER(class) \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
-\r
-#define vTraceExcludeKernelServiceReceiveFromTrace_HELPER(class) \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
-TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
-\r
-#define vTraceIncludeKernelServiceReceiveInTrace_HELPER(class) \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
-TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
-\r
-/* EXCLUDE AND INCLUDE FOR QUEUE */\r
-#define vTraceExcludeKernelServiceQueueSendFromTrace() \\r
-vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
-\r
-#define vTraceIncludeKernelServiceQueueSendInTrace() \\r
-vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_QUEUE);\r
-\r
-#define vTraceExcludeKernelServiceQueueReceiveFromTrace() \\r
-vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
-\r
-#define vTraceIncludeKernelServiceQueueReceiveInTrace() \\r
-vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_QUEUE);\r
-\r
-/* EXCLUDE AND INCLUDE FOR SEMAPHORE */\r
-#define vTraceExcludeKernelServiceSemaphoreSendFromTrace() \\r
-vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
-\r
-#define vTraceIncludeKernelServicSemaphoreSendInTrace() \\r
-vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
-\r
-#define vTraceExcludeKernelServiceSemaphoreReceiveFromTrace() \\r
-vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
-\r
-#define vTraceIncludeKernelServiceSemaphoreReceiveInTrace() \\r
-vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
-\r
-/* EXCLUDE AND INCLUDE FOR MUTEX */\r
-#define vTraceExcludeKernelServiceMutexSendFromTrace() \\r
-vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
-\r
-#define vTraceIncludeKernelServiceMutexSendInTrace() \\r
-vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_MUTEX);\r
-\r
-#define vTraceExcludeKernelServiceMutexReceiveFromTrace() \\r
-vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
-\r
-#define vTraceIncludeKernelServiceMutexReceiveInTrace() \\r
-vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_MUTEX);\r
-\r
-/************************************************************************/\r
-/* KERNEL SPECIFIC MACROS TO NAME OBJECTS, IF NECESSARY */\r
-/************************************************************************/\r
-#define vTraceSetQueueName(object, name) \\r
-vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
-\r
-#define vTraceSetSemaphoreName(object, name) \\r
-vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
-\r
-#define vTraceSetMutexName(object, name) \\r
-vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
-\r
-#endif\r
-\r
-#endif /* TRCKERNELPORT_H_ */
\ No newline at end of file
events that would be partially overwritten. If so, they must be "killed"\r
by replacing the user event and following data with NULL events (i.e.,\r
using a memset to zero).*/\r
- prvCheckDataToBeOverwrittenForMultiEntryEvents(noOfSlots);\r
+ prvCheckDataToBeOverwrittenForMultiEntryEvents((uint8_t)noOfSlots);\r
#endif\r
/* Copy the local buffer to the main buffer */\r
(void)memcpy(& RecorderDataPtr->eventData[RecorderDataPtr->nextFreeIndex * 4],\r
main USER_EVENT entry (Note: important that this is after the memcpy,\r
but within the critical section!)*/\r
RecorderDataPtr->eventData[RecorderDataPtr->nextFreeIndex * 4] =\r
- (uint8_t) USER_EVENT + noOfSlots - 1;\r
+ (uint8_t) ( USER_EVENT + noOfSlots - 1 );\r
\r
/* Update the main buffer event index (already checked that it fits in\r
the buffer, so no need to check for wrapping)*/\r