]> git.sur5r.net Git - freertos/blob - FreeRTOS-Plus/Source/FreeRTOS-Plus-Trace/Include/trcKernelPort.h
Final preparation for new release:
[freertos] / FreeRTOS-Plus / Source / FreeRTOS-Plus-Trace / Include / trcKernelPort.h
1 /*******************************************************************************\r
2  * Tracealyzer v2.7.7 Recorder Library\r
3  * Percepio AB, www.percepio.com\r
4  *\r
5  * trcKernelPortFreeRTOS.h\r
6  *\r
7  * Kernel-specific functionality for FreeRTOS, used by the recorder library.\r
8  *\r
9  * Terms of Use\r
10  * This software is copyright Percepio AB. The recorder library is free for\r
11  * use together with Percepio products. You may distribute the recorder library\r
12  * in its original form, including modifications in trcHardwarePort.c/.h\r
13  * given that these modification are clearly marked as your own modifications\r
14  * and documented in the initial comment section of these source files.\r
15  * This software is the intellectual property of Percepio AB and may not be\r
16  * sold or in other ways commercially redistributed without explicit written\r
17  * permission by Percepio AB.\r
18  *\r
19  * Disclaimer\r
20  * The trace tool and recorder library is being delivered to you AS IS and\r
21  * Percepio AB makes no warranty as to its use or performance. Percepio AB does\r
22  * not and cannot warrant the performance or results you may obtain by using the\r
23  * software or documentation. Percepio AB make no warranties, express or\r
24  * implied, as to noninfringement of third party rights, merchantability, or\r
25  * fitness for any particular purpose. In no event will Percepio AB, its\r
26  * technology partners, or distributors be liable to you for any consequential,\r
27  * incidental or special damages, including any lost profits or lost savings,\r
28  * even if a representative of Percepio AB has been advised of the possibility\r
29  * of such damages, or for any claim by any third party. Some jurisdictions do\r
30  * not allow the exclusion or limitation of incidental, consequential or special\r
31  * damages, or the exclusion of implied warranties or limitations on how long an\r
32  * implied warranty may last, so the above limitations may not apply to you.\r
33  *\r
34  * Tabs are used for indent in this file (1 tab = 4 spaces)\r
35  *\r
36  * Copyright Percepio AB, 2012-2015.\r
37  * www.percepio.com\r
38  ******************************************************************************/\r
39 \r
40 \r
41 #ifndef TRCKERNELPORTFREERTOS_H\r
42 #define TRCKERNELPORTFREERTOS_H\r
43 \r
44 #include "FreeRTOS.h"   /* Defines configUSE_TRACE_FACILITY */\r
45 #include "trcHardwarePort.h"\r
46 \r
47 extern int uiInEventGroupSetBitsFromISR;\r
48 \r
49 #define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY\r
50 \r
51 #if (USE_TRACEALYZER_RECORDER == 1)\r
52 \r
53 /* Defines that must be set for the recorder to work properly */\r
54 #define TRACE_KERNEL_VERSION 0x1AA1\r
55 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */\r
56 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */\r
57 \r
58 #if (SELECTED_PORT == PORT_ARM_CortexM)\r
59                 \r
60     /* If you get warnings regarding __get_PRIMASK and __set_PRIMASK, make sure that ARM's CMSIS API is included \r
61         by the recorder using your chip vendor header file (e.g., "board.h", "stm32f4xx.h", "lpc17xx.h") */\r
62         \r
63         #define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status; \r
64         #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __get_PRIMASK(); __set_PRIMASK(1);}\r
65         #define TRACE_EXIT_CRITICAL_SECTION() {__set_PRIMASK(__irq_status);}\r
66 \r
67 #endif\r
68 \r
69 #if ((SELECTED_PORT == PORT_ARM_CORTEX_A9) || (SELECTED_PORT == PORT_Renesas_RX600) || (SELECTED_PORT == PORT_MICROCHIP_PIC32MX) || (SELECTED_PORT == PORT_MICROCHIP_PIC32MZ))\r
70         #define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status;\r
71         #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}\r
72         #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}\r
73 #endif\r
74 \r
75 #if (SELECTED_PORT == PORT_Win32)\r
76     /* In the Win32 port, there are no real interrupts, so we can use the normal critical sections */\r
77         #define TRACE_SR_ALLOC_CRITICAL_SECTION()\r
78         #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()\r
79         #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()\r
80 #endif\r
81 \r
82 #ifndef TRACE_ENTER_CRITICAL_SECTION\r
83         #error "This port has no valid definition for critical sections! See http://percepio.com/2014/10/27/how-to-define-critical-sections-for-the-recorder/"\r
84 #endif\r
85 \r
86 #if (SELECTED_PORT == PORT_ARM_CortexM)\r
87         #define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_BEGIN\r
88         #define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_END\r
89 #else\r
90         #define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY() recorder_busy++;\r
91         #define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY() recorder_busy--;\r
92 #endif\r
93 \r
94 /*************************************************************************/\r
95 /* KERNEL SPECIFIC OBJECT CONFIGURATION                                                                  */\r
96 /*************************************************************************/\r
97 #define TRACE_NCLASSES 7\r
98 #define TRACE_CLASS_QUEUE ((traceObjectClass)0)\r
99 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)\r
100 #define TRACE_CLASS_MUTEX ((traceObjectClass)2)\r
101 #define TRACE_CLASS_TASK ((traceObjectClass)3)\r
102 #define TRACE_CLASS_ISR ((traceObjectClass)4)\r
103 #define TRACE_CLASS_TIMER ((traceObjectClass)5)\r
104 #define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6)\r
105 \r
106 #define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR + NTimer + NEventGroup)\r
107 \r
108 /* The size of the Object Property Table entries, in bytes, per object */\r
109 \r
110 /* Queue properties (except name):      current number of message in queue */\r
111 #define PropertyTableSizeQueue          (NameLenQueue + 1)\r
112 \r
113 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */\r
114 #define PropertyTableSizeSemaphore      (NameLenSemaphore + 1)\r
115 \r
116 /* Mutex properties (except name):      owner (task handle, 0 = free) */\r
117 #define PropertyTableSizeMutex          (NameLenMutex + 1)\r
118 \r
119 /* Task properties (except name):       Byte 0: Current priority\r
120                                                                         Byte 1: state (if already active)\r
121                                                                         Byte 2: legacy, not used\r
122                                                                         Byte 3: legacy, not used */\r
123 #define PropertyTableSizeTask           (NameLenTask + 4)\r
124 \r
125 /* ISR properties:                                      Byte 0: priority\r
126                                                                         Byte 1: state (if already active) */\r
127 #define PropertyTableSizeISR            (NameLenISR + 2)\r
128 \r
129 /* NTimer properties:                           Byte 0: state (unused for now) */\r
130 #define PropertyTableSizeTimer          (NameLenTimer + 1)\r
131 \r
132 /* NEventGroup properties:                      Byte 0-3: state (unused for now)*/\r
133 #define PropertyTableSizeEventGroup     (NameLenEventGroup + 4)\r
134 \r
135 \r
136 /* The layout of the byte array representing the Object Property Table */\r
137 #define StartIndexQueue                 0\r
138 #define StartIndexSemaphore             StartIndexQueue         + NQueue                * PropertyTableSizeQueue\r
139 #define StartIndexMutex                 StartIndexSemaphore + NSemaphore        * PropertyTableSizeSemaphore\r
140 #define StartIndexTask                  StartIndexMutex         + NMutex                * PropertyTableSizeMutex\r
141 #define StartIndexISR                   StartIndexTask          + NTask                 * PropertyTableSizeTask\r
142 #define StartIndexTimer                 StartIndexISR           + NISR                  * PropertyTableSizeISR\r
143 #define StartIndexEventGroup    StartIndexTimer         + NTimer                * PropertyTableSizeTimer\r
144 \r
145 /* Number of bytes used by the object table */\r
146 #define TRACE_OBJECT_TABLE_SIZE StartIndexEventGroup + NEventGroup * PropertyTableSizeEventGroup\r
147 \r
148 #define FREERTOS_VERSION_NOT_SET                        0\r
149 #define FREERTOS_VERSION_7_3_OR_7_4                     1\r
150 #define FREERTOS_VERSION_7_5_OR_7_6                     2\r
151 #define FREERTOS_VERSION_8_0_OR_LATER           3\r
152 \r
153 /* Includes */\r
154 #include "trcConfig.h" /* Must be first, even before trcTypes.h */\r
155 #include "trcHardwarePort.h"\r
156 #include "trcTypes.h"\r
157 #include "trcKernelHooks.h"\r
158 #include "trcBase.h"\r
159 #include "trcKernel.h"\r
160 #include "trcUser.h"\r
161 \r
162 #if (INCLUDE_NEW_TIME_EVENTS == 1 && configUSE_TICKLESS_IDLE != 0)\r
163 #error "NewTime events can not be used in combination with tickless idle!"\r
164 #endif\r
165 \r
166 /* Initialization of the object property table */\r
167 void vTraceInitObjectPropertyTable(void);\r
168 \r
169 /* Initialization of the handle mechanism, see e.g, xTraceGetObjectHandle */\r
170 void vTraceInitObjectHandleStack(void);\r
171 \r
172 /* Returns the "Not enough handles" error message for the specified object class */\r
173 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);\r
174 \r
175 /*******************************************************************************\r
176  * The event codes - should match the offline config file.\r
177  *\r
178  * Some sections below are encoded to allow for constructions like:\r
179  *\r
180  * vTraceStoreKernelCall(EVENTGROUP_CREATE + objectclass, ...\r
181  *\r
182  * The object class ID is given by the three LSB bits, in such cases. Since each\r
183  * object class has a separate object property table, the class ID is needed to\r
184  * know what section in the object table to use for getting an object name from\r
185  * an object handle.\r
186  ******************************************************************************/\r
187 \r
188 #define NULL_EVENT                                      (0x00) /* Ignored in the analysis*/\r
189 \r
190 /*******************************************************************************\r
191  * EVENTGROUP_DIV\r
192  *\r
193  * Miscellaneous events.\r
194  ******************************************************************************/\r
195 #define EVENTGROUP_DIV                          (NULL_EVENT + 1)                                        /*0x01*/\r
196 #define DIV_XPS                                         (EVENTGROUP_DIV + 0)                            /*0x01*/\r
197 #define DIV_TASK_READY                          (EVENTGROUP_DIV + 1)                            /*0x02*/\r
198 #define DIV_NEW_TIME                            (EVENTGROUP_DIV + 2)                            /*0x03*/\r
199 \r
200 /*******************************************************************************\r
201  * EVENTGROUP_TS\r
202  *\r
203  * Events for storing task-switches and interrupts. The RESUME events are\r
204  * generated if the task/interrupt is already marked active.\r
205  ******************************************************************************/\r
206 #define EVENTGROUP_TS                           (EVENTGROUP_DIV + 3)                            /*0x04*/\r
207 #define TS_ISR_BEGIN                            (EVENTGROUP_TS + 0)                                     /*0x04*/\r
208 #define TS_ISR_RESUME                           (EVENTGROUP_TS + 1)                                     /*0x05*/\r
209 #define TS_TASK_BEGIN                           (EVENTGROUP_TS + 2)                                     /*0x06*/\r
210 #define TS_TASK_RESUME                          (EVENTGROUP_TS + 3)                                     /*0x07*/\r
211 \r
212 /*******************************************************************************\r
213  * EVENTGROUP_OBJCLOSE_NAME\r
214  *\r
215  * About Close Events\r
216  * When an object is evicted from the object property table (object close), two\r
217  * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and\r
218  * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object\r
219  * properties valid up to this point.\r
220  ******************************************************************************/\r
221 #define EVENTGROUP_OBJCLOSE_NAME        (EVENTGROUP_TS + 4)                                     /*0x08*/\r
222 \r
223 /*******************************************************************************\r
224  * EVENTGROUP_OBJCLOSE_PROP\r
225  *\r
226  * The internal event carrying properties of deleted objects\r
227  * The handle and object class of the closed object is not stored in this event,\r
228  * but is assumed to be the same as in the preceding CLOSE event. Thus, these\r
229  * two events must be generated from within a critical section.\r
230  * When queues are closed, arg1 is the "state" property (i.e., number of\r
231  * buffered messages/signals).\r
232  * When actors are closed, arg1 is priority, arg2 is handle of the "instance\r
233  * finish" event, and arg3 is event code of the "instance finish" event.\r
234  * In this case, the lower three bits is the object class of the instance finish\r
235  * handle. The lower three bits are not used (always zero) when queues are\r
236  * closed since the queue type is given in the previous OBJCLOSE_NAME event.\r
237  ******************************************************************************/\r
238 #define EVENTGROUP_OBJCLOSE_PROP        (EVENTGROUP_OBJCLOSE_NAME + 8)          /*0x10*/\r
239 \r
240 /*******************************************************************************\r
241  * EVENTGROUP_CREATE\r
242  *\r
243  * The events in this group are used to log Kernel object creations.\r
244  * The lower three bits in the event code gives the object class, i.e., type of\r
245  * create operation (task, queue, semaphore, etc).\r
246  ******************************************************************************/\r
247 #define EVENTGROUP_CREATE_OBJ_SUCCESS   (EVENTGROUP_OBJCLOSE_PROP + 8)  /*0x18*/\r
248 \r
249 /*******************************************************************************\r
250  * EVENTGROUP_SEND\r
251  *\r
252  * The events in this group are used to log Send/Give events on queues,\r
253  * semaphores and mutexes The lower three bits in the event code gives the\r
254  * object class, i.e., what type of object that is operated on (queue, semaphore\r
255  * or mutex).\r
256  ******************************************************************************/\r
257 #define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_OBJ_SUCCESS + 8)             /*0x20*/\r
258 \r
259 /*******************************************************************************\r
260  * EVENTGROUP_RECEIVE\r
261  *\r
262  * The events in this group are used to log Receive/Take events on queues,\r
263  * semaphores and mutexes. The lower three bits in the event code gives the\r
264  * object class, i.e., what type of object that is operated on (queue, semaphore\r
265  * or mutex).\r
266  ******************************************************************************/\r
267 #define EVENTGROUP_RECEIVE_SUCCESS      (EVENTGROUP_SEND_SUCCESS + 8)           /*0x28*/\r
268 \r
269 /* Send/Give operations, from ISR */\r
270 #define EVENTGROUP_SEND_FROM_ISR_SUCCESS \\r
271                                                                         (EVENTGROUP_RECEIVE_SUCCESS + 8)        /*0x30*/\r
272 \r
273 /* Receive/Take operations, from ISR */\r
274 #define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS \\r
275                                                         (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8)          /*0x38*/\r
276 \r
277 /* "Failed" event type versions of above (timeout, failed allocation, etc) */\r
278 #define EVENTGROUP_KSE_FAILED \\r
279                                                         (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8)       /*0x40*/\r
280 \r
281 /* Failed create calls - memory allocation failed */\r
282 #define EVENTGROUP_CREATE_OBJ_FAILED    (EVENTGROUP_KSE_FAILED)                 /*0x40*/\r
283 \r
284 /* Failed send/give - timeout! */\r
285 #define EVENTGROUP_SEND_FAILED          (EVENTGROUP_CREATE_OBJ_FAILED + 8)      /*0x48*/\r
286 \r
287 /* Failed receive/take - timeout! */\r
288 #define EVENTGROUP_RECEIVE_FAILED        (EVENTGROUP_SEND_FAILED + 8)           /*0x50*/\r
289 \r
290 /* Failed non-blocking send/give - queue full */\r
291 #define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8) /*0x58*/\r
292 \r
293 /* Failed non-blocking receive/take - queue empty */\r
294 #define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \\r
295                                                                  (EVENTGROUP_SEND_FROM_ISR_FAILED + 8)  /*0x60*/\r
296 \r
297 /* Events when blocking on receive/take */\r
298 #define EVENTGROUP_RECEIVE_BLOCK \\r
299                                                         (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8)        /*0x68*/\r
300 \r
301 /* Events when blocking on send/give */\r
302 #define EVENTGROUP_SEND_BLOCK   (EVENTGROUP_RECEIVE_BLOCK + 8)                  /*0x70*/\r
303 \r
304 /* Events on queue peek (receive) */\r
305 #define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8)                             /*0x78*/\r
306 \r
307 /* Events on object delete (vTaskDelete or vQueueDelete) */\r
308 #define EVENTGROUP_DELETE_OBJ_SUCCESS   (EVENTGROUP_PEEK_SUCCESS + 8)   /*0x80*/\r
309 \r
310 /* Other events - object class is implied: TASK */\r
311 #define EVENTGROUP_OTHERS       (EVENTGROUP_DELETE_OBJ_SUCCESS + 8)                     /*0x88*/\r
312 #define TASK_DELAY_UNTIL        (EVENTGROUP_OTHERS + 0)                                         /*0x88*/\r
313 #define TASK_DELAY                      (EVENTGROUP_OTHERS + 1)                                         /*0x89*/\r
314 #define TASK_SUSPEND            (EVENTGROUP_OTHERS + 2)                                         /*0x8A*/\r
315 #define TASK_RESUME                     (EVENTGROUP_OTHERS + 3)                                         /*0x8B*/\r
316 #define TASK_RESUME_FROM_ISR    (EVENTGROUP_OTHERS + 4)                                 /*0x8C*/\r
317 #define TASK_PRIORITY_SET               (EVENTGROUP_OTHERS + 5)                                 /*0x8D*/\r
318 #define TASK_PRIORITY_INHERIT   (EVENTGROUP_OTHERS + 6)                                 /*0x8E*/\r
319 #define TASK_PRIORITY_DISINHERIT        (EVENTGROUP_OTHERS + 7)                         /*0x8F*/\r
320 \r
321 #define EVENTGROUP_MISC_PLACEHOLDER     (EVENTGROUP_OTHERS + 8)                         /*0x90*/\r
322 #define PEND_FUNC_CALL          (EVENTGROUP_MISC_PLACEHOLDER+0)                         /*0x90*/\r
323 #define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1)                 /*0x91*/\r
324 #define PEND_FUNC_CALL_FAILED (EVENTGROUP_MISC_PLACEHOLDER+2)                   /*0x92*/\r
325 #define PEND_FUNC_CALL_FROM_ISR_FAILED (EVENTGROUP_MISC_PLACEHOLDER+3)  /*0x93*/\r
326 #define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4)                                 /*0x94*/\r
327 #define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5)                                 /*0x95*/\r
328 #define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6)                                   /*0x96*/\r
329 #define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7)                                   /*0x97*/\r
330 \r
331 /* User events */\r
332 #define EVENTGROUP_USEREVENT (EVENTGROUP_MISC_PLACEHOLDER + 8)                  /*0x98*/\r
333 #define USER_EVENT (EVENTGROUP_USEREVENT + 0)\r
334 \r
335 /* Allow for 0-15 arguments (the number of args is added to event code) */\r
336 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15)                                             /*0xA7*/\r
337 \r
338 /*******************************************************************************\r
339  * XTS Event - eXtended TimeStamp events\r
340  * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.\r
341  * the time since the last stored event. The DTS fields are either 1 or 2 bytes\r
342  * in the other events, depending on the bytes available in the event struct.\r
343  * If the time since the last event (the DTS) is larger than allowed for by\r
344  * the DTS field of the current event, an XTS event is inserted immediately\r
345  * before the original event. The XTS event contains up to 3 additional bytes\r
346  * of the DTS value - the higher bytes of the true DTS value. The lower 1-2\r
347  * bytes are stored in the normal DTS field.\r
348  * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored\r
349  * when there is only room for 1 byte (8 bit) DTS data in the original event,\r
350  * which means a limit of 0xFF (255). The XTS16 is used when the original event\r
351  * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535).\r
352  *\r
353  * Using a very high frequency time base can result in many XTS events.\r
354  * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,\r
355  * at most 65535. If your time base has a higher frequency, you can define\r
356  * the TRACE\r
357  ******************************************************************************/\r
358 \r
359 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16)                                              /*0xA8*/\r
360 #define XTS8 (EVENTGROUP_SYS + 0)                                                                               /*0xA8*/\r
361 #define XTS16 (EVENTGROUP_SYS + 1)                                                                              /*0xA9*/\r
362 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2)                                                /*0xAA*/\r
363 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3)                                                /*0xAB*/\r
364 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4)                                                    /*0xAC*/\r
365 #define LOW_POWER_END (EVENTGROUP_SYS + 5)                                                              /*0xAD*/\r
366 #define XID (EVENTGROUP_SYS + 6)                                                                                /*0xAE*/\r
367 #define XTS16L (EVENTGROUP_SYS + 7)                                                                             /*0xAF*/\r
368 \r
369 #define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8)                                                   /*0xB0*/\r
370 #define TIMER_CREATE (EVENTGROUP_TIMER + 0)                                                             /*0xB0*/\r
371 #define TIMER_START (EVENTGROUP_TIMER + 1)                                                              /*0xB1*/\r
372 #define TIMER_RST (EVENTGROUP_TIMER + 2)                                                                /*0xB2*/\r
373 #define TIMER_STOP (EVENTGROUP_TIMER + 3)                                                               /*0xB3*/\r
374 #define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4)                                              /*0xB4*/\r
375 #define TIMER_DELETE (EVENTGROUP_TIMER + 5)                                                             /*0xB5*/\r
376 #define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6)                                             /*0xB6*/\r
377 #define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7)                                             /*0xB7*/\r
378 #define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8)                                              /*0xB8*/\r
379 \r
380 #define TIMER_CREATE_FAILED (EVENTGROUP_TIMER + 9)                                              /*0xB9*/\r
381 #define TIMER_START_FAILED (EVENTGROUP_TIMER + 10)                                              /*0xBA*/\r
382 #define TIMER_RESET_FAILED (EVENTGROUP_TIMER + 11)                                              /*0xBB*/\r
383 #define TIMER_STOP_FAILED (EVENTGROUP_TIMER + 12)                                               /*0xBC*/\r
384 #define TIMER_CHANGE_PERIOD_FAILED (EVENTGROUP_TIMER + 13)                              /*0xBD*/\r
385 #define TIMER_DELETE_FAILED (EVENTGROUP_TIMER + 14)                                             /*0xBE*/\r
386 #define TIMER_START_FROM_ISR_FAILED (EVENTGROUP_TIMER + 15)                             /*0xBF*/\r
387 #define TIMER_RESET_FROM_ISR_FAILED (EVENTGROUP_TIMER + 16)                             /*0xC0*/\r
388 #define TIMER_STOP_FROM_ISR_FAILED (EVENTGROUP_TIMER + 17)                              /*0xC1*/\r
389 \r
390 #define EVENTGROUP_EG (EVENTGROUP_TIMER + 18)                                                   /*0xC2*/\r
391 #define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0)                                                  /*0xC2*/\r
392 #define EVENT_GROUP_CREATE_FAILED (EVENTGROUP_EG + 1)                                   /*0xC3*/\r
393 #define EVENT_GROUP_SYNC_BLOCK (EVENTGROUP_EG + 2)                                              /*0xC4*/\r
394 #define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3)                                                /*0xC5*/\r
395 #define EVENT_GROUP_WAIT_BITS_BLOCK (EVENTGROUP_EG + 4)                                 /*0xC6*/\r
396 #define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5)                                   /*0xC7*/\r
397 #define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6)                                              /*0xC8*/\r
398 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7)                             /*0xC9*/\r
399 #define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8)                                                /*0xCA*/\r
400 #define EVENT_GROUP_DELETE (EVENTGROUP_EG + 9)                                                  /*0xCB*/\r
401 #define EVENT_GROUP_SYNC_END_FAILED (EVENTGROUP_EG + 10)                                /*0xCC*/\r
402 #define EVENT_GROUP_WAIT_BITS_END_FAILED (EVENTGROUP_EG + 11)                   /*0xCD*/\r
403 #define EVENT_GROUP_SET_BITS_FROM_ISR (EVENTGROUP_EG + 12)                              /*0xCE*/\r
404 #define EVENT_GROUP_SET_BITS_FROM_ISR_FAILED (EVENTGROUP_EG + 13)               /*0xCF*/\r
405 \r
406 #define TASK_INSTANCE_FINISHED_NEXT_KSE (EVENTGROUP_EG + 14)                    /*0xD0*/\r
407 #define TASK_INSTANCE_FINISHED_DIRECT (EVENTGROUP_EG + 15)                              /*0xD1*/\r
408 \r
409 #define TRACE_TASK_NOTIFY_GROUP (EVENTGROUP_EG + 16)                                    /*0xD2*/\r
410 #define TRACE_TASK_NOTIFY (TRACE_TASK_NOTIFY_GROUP + 0)                                 /*0xD2*/\r
411 #define TRACE_TASK_NOTIFY_TAKE (TRACE_TASK_NOTIFY_GROUP + 1)                    /*0xD3*/\r
412 #define TRACE_TASK_NOTIFY_TAKE_BLOCK (TRACE_TASK_NOTIFY_GROUP + 2)              /*0xD4*/\r
413 #define TRACE_TASK_NOTIFY_TAKE_FAILED (TRACE_TASK_NOTIFY_GROUP + 3)             /*0xD5*/\r
414 #define TRACE_TASK_NOTIFY_WAIT (TRACE_TASK_NOTIFY_GROUP + 4)                    /*0xD6*/\r
415 #define TRACE_TASK_NOTIFY_WAIT_BLOCK (TRACE_TASK_NOTIFY_GROUP + 5)              /*0xD7*/\r
416 #define TRACE_TASK_NOTIFY_WAIT_FAILED (TRACE_TASK_NOTIFY_GROUP + 6)             /*0xD8*/\r
417 #define TRACE_TASK_NOTIFY_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 7)                /*0xD9*/\r
418 #define TRACE_TASK_NOTIFY_GIVE_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 8)   /*0xDA*/\r
419 \r
420 /************************************************************************/\r
421 /* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE                             */\r
422 /* FUNCTIONALITY REQUESTED BY THE TRACE RECORDER                                                */\r
423 /************************************************************************/\r
424 \r
425 /******************************************************************************\r
426  * TraceObjectClassTable\r
427  * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).\r
428  * This was added since we want to map both types of Mutex and both types of\r
429  * Semaphores on common classes for all Mutexes and all Semaphores respectively.\r
430  *\r
431  * FreeRTOS Queue types\r
432  * #define queueQUEUE_TYPE_BASE                                 (0U) => TRACE_CLASS_QUEUE\r
433  * #define queueQUEUE_TYPE_MUTEX                                (1U) => TRACE_CLASS_MUTEX\r
434  * #define queueQUEUE_TYPE_COUNTING_SEMAPHORE   (2U) => TRACE_CLASS_SEMAPHORE\r
435  * #define queueQUEUE_TYPE_BINARY_SEMAPHORE             (3U) => TRACE_CLASS_SEMAPHORE\r
436  * #define queueQUEUE_TYPE_RECURSIVE_MUTEX              (4U) => TRACE_CLASS_MUTEX\r
437  ******************************************************************************/\r
438 \r
439 extern traceObjectClass TraceObjectClassTable[5];\r
440 \r
441 /* These functions are implemented in the .c file since certain header files\r
442 must not be included in this one */\r
443 objectHandleType prvTraceGetObjectNumber(void* handle);\r
444 unsigned char prvTraceGetObjectType(void* handle);\r
445 objectHandleType prvTraceGetTaskNumber(void* handle);\r
446 unsigned char prvTraceIsSchedulerActive(void);\r
447 unsigned char prvTraceIsSchedulerSuspended(void);\r
448 unsigned char prvTraceIsSchedulerStarted(void);\r
449 void* prvTraceGetCurrentTaskHandle(void);\r
450 \r
451 #if (configUSE_TIMERS == 1)\r
452 #undef INCLUDE_xTimerGetTimerDaemonTaskHandle\r
453 #define INCLUDE_xTimerGetTimerDaemonTaskHandle 1\r
454 #endif\r
455 \r
456 /************************************************************************/\r
457 /* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER                                    */\r
458 /************************************************************************/\r
459 \r
460 #define TRACE_MALLOC(size) pvPortMalloc(size)\r
461 #define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()\r
462 #define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()\r
463 #define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()\r
464 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()\r
465 \r
466 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)\r
467 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)\r
468 #define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))\r
469 #define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = xTraceGetObjectHandle(TRACE_CLASS_TASK);\r
470 \r
471 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]\r
472 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))\r
473 \r
474 /* Note: Timer tracing only supported on FreeRTOS v8 or later, so "Timer_t" is safe to use! */\r
475 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( objectHandleType ) ((Timer_t*)tmr)->uxTimerNumber )\r
476 #define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = xTraceGetObjectHandle(TRACE_CLASS_TIMER);\r
477 #define TRACE_GET_TIMER_NAME(pxTimer) pxTimer->pcTimerName\r
478 #define TRACE_GET_TIMER_PERIOD(pxTimer) pxTimer->xTimerPeriodInTicks\r
479 \r
480 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( objectHandleType ) uxEventGroupGetNumber(eg) )\r
481 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = xTraceGetObjectHandle(TRACE_CLASS_EVENTGROUP);\r
482 \r
483 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))\r
484 \r
485 #if (FREERTOS_VERSION < FREERTOS_VERSION_8_0_OR_LATER)\r
486         #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->ucQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));\r
487 #else\r
488         #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->uxQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));\r
489 #endif\r
490 \r
491 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))\r
492 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))\r
493 #define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)\r
494 \r
495 /************************************************************************/\r
496 /* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL          */\r
497 /************************************************************************/\r
498 \r
499 #if (configUSE_TICKLESS_IDLE != 0)\r
500 \r
501 #undef traceLOW_POWER_IDLE_BEGIN\r
502 #define traceLOW_POWER_IDLE_BEGIN() \\r
503         { \\r
504                 extern uint32_t trace_disable_timestamp; \\r
505                 vTraceStoreLowPower(0); \\r
506                 trace_disable_timestamp = 1; \\r
507         }\r
508 \r
509 #undef traceLOW_POWER_IDLE_END\r
510 #define traceLOW_POWER_IDLE_END() \\r
511         { \\r
512                 extern uint32_t trace_disable_timestamp; \\r
513                 trace_disable_timestamp = 0; \\r
514                 vTraceStoreLowPower(1); \\r
515         }\r
516 \r
517 #endif\r
518 \r
519 /* A macro that will update the tick count when returning from tickless idle */\r
520 #undef traceINCREASE_TICK_COUNT\r
521 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/\r
522 #define traceINCREASE_TICK_COUNT( xCount ) { DWT_CYCLES_ADDED += (xCount * (TRACE_CPU_CLOCK_HZ / TRACE_TICK_RATE_HZ)); }\r
523 \r
524 /* Called for each task that becomes ready */\r
525 #undef traceMOVED_TASK_TO_READY_STATE\r
526 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \\r
527         trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);\r
528 \r
529 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */\r
530 #undef traceTASK_INCREMENT_TICK\r
531 \r
532 #if (FREERTOS_VERSION == FREERTOS_VERSION_7_3_OR_7_4)\r
533 \r
534 #define traceTASK_INCREMENT_TICK( xTickCount ) \\r
535         if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \\r
536         if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\r
537 \r
538 #else\r
539 \r
540 #define traceTASK_INCREMENT_TICK( xTickCount ) \\r
541         if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \\r
542         if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\r
543 \r
544 #endif\r
545 \r
546 /* Called on each task-switch */\r
547 #undef traceTASK_SWITCHED_IN\r
548 #define traceTASK_SWITCHED_IN() \\r
549                 trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());\r
550 \r
551 /* Called on vTaskSuspend */\r
552 #undef traceTASK_SUSPEND\r
553 #define traceTASK_SUSPEND( pxTaskToSuspend ) \\r
554         trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);\r
555 \r
556 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */\r
557 #undef traceTASK_DELAY\r
558 #define traceTASK_DELAY() \\r
559         trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \\r
560         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
561 \r
562 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */\r
563 #undef traceTASK_DELAY_UNTIL\r
564 #define traceTASK_DELAY_UNTIL() \\r
565         trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \\r
566         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
567 \r
568 #if (INCLUDE_OBJECT_DELETE == 1)\r
569 /* Called on vTaskDelete */\r
570 #undef traceTASK_DELETE\r
571 #define traceTASK_DELETE( pxTaskToDelete ) \\r
572         { TRACE_SR_ALLOC_CRITICAL_SECTION(); \\r
573         TRACE_ENTER_CRITICAL_SECTION(); \\r
574         trcKERNEL_HOOKS_TASK_DELETE(DELETE_OBJ, pxTaskToDelete); \\r
575         TRACE_EXIT_CRITICAL_SECTION(); }\r
576 #endif\r
577 \r
578 #if (INCLUDE_OBJECT_DELETE == 1)\r
579 /* Called on vQueueDelete */\r
580 #undef traceQUEUE_DELETE\r
581 #define traceQUEUE_DELETE( pxQueue ) \\r
582         { TRACE_SR_ALLOC_CRITICAL_SECTION(); \\r
583         TRACE_ENTER_CRITICAL_SECTION(); \\r
584         trcKERNEL_HOOKS_OBJECT_DELETE(DELETE_OBJ, UNUSED, pxQueue); \\r
585         TRACE_EXIT_CRITICAL_SECTION(); }\r
586 #endif\r
587 \r
588 /* Called on vTaskCreate */\r
589 #undef traceTASK_CREATE\r
590 #define traceTASK_CREATE(pxNewTCB) \\r
591         if (pxNewTCB != NULL) \\r
592         { \\r
593                 trcKERNEL_HOOKS_TASK_CREATE(CREATE_OBJ, UNUSED, pxNewTCB); \\r
594         }\r
595 \r
596 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */\r
597 #undef traceTASK_CREATE_FAILED\r
598 #define traceTASK_CREATE_FAILED() \\r
599         trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE_OBJ, UNUSED);\r
600 \r
601 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */\r
602 #undef traceQUEUE_CREATE\r
603 #define traceQUEUE_CREATE( pxNewQueue )\\r
604         trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, UNUSED, pxNewQueue);\r
605 \r
606 /* Called in xQueueCreate, if the queue creation fails */\r
607 #undef traceQUEUE_CREATE_FAILED\r
608 #define traceQUEUE_CREATE_FAILED( queueType ) \\r
609         trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, UNUSED, queueType);\r
610 \r
611 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */\r
612 #undef traceCREATE_MUTEX\r
613 #define traceCREATE_MUTEX( pxNewQueue ) \\r
614         trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, UNUSED, pxNewQueue);\r
615 \r
616 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */\r
617 #undef traceCREATE_MUTEX_FAILED\r
618 #define traceCREATE_MUTEX_FAILED() \\r
619         trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, UNUSED, queueQUEUE_TYPE_MUTEX);\r
620 \r
621 /* Called when the Mutex can not be given, since not holder */\r
622 #undef traceGIVE_MUTEX_RECURSIVE_FAILED\r
623 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \\r
624         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxMutex);\r
625 \r
626 /* Called when a message is sent to a queue */  /* CS IS NEW ! */\r
627 #undef traceQUEUE_SEND\r
628 #define traceQUEUE_SEND( pxQueue ) \\r
629         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, UNUSED, pxQueue); \\r
630         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1));\r
631 \r
632 /* Called when a message failed to be sent to a queue (timeout) */\r
633 #undef traceQUEUE_SEND_FAILED\r
634 #define traceQUEUE_SEND_FAILED( pxQueue ) \\r
635         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxQueue);\r
636 \r
637 /* Called when the task is blocked due to a send operation on a full queue */\r
638 #undef traceBLOCKING_ON_QUEUE_SEND\r
639 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \\r
640         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, UNUSED, pxQueue);\r
641 \r
642 /* Called when a message is received from a queue */\r
643 #undef traceQUEUE_RECEIVE\r
644 #define traceQUEUE_RECEIVE( pxQueue ) \\r
645         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, UNUSED, pxQueue); \\r
646         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1));\r
647 \r
648 /* Called when a receive operation on a queue fails (timeout) */\r
649 #undef traceQUEUE_RECEIVE_FAILED\r
650 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \\r
651         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, UNUSED, pxQueue);\r
652 \r
653 /* Called when the task is blocked due to a receive operation on an empty queue */\r
654 #undef traceBLOCKING_ON_QUEUE_RECEIVE\r
655 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \\r
656         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, UNUSED, pxQueue); \\r
657         if (TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \\r
658         {trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();}\r
659 \r
660 /* Called on xQueuePeek */\r
661 #undef traceQUEUE_PEEK\r
662 #define traceQUEUE_PEEK( pxQueue ) \\r
663         trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, UNUSED, pxQueue);\r
664 \r
665 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */\r
666 #undef traceQUEUE_SEND_FROM_ISR\r
667 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \\r
668         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
669         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));\r
670 \r
671 /* Called when a message send from interrupt context fails (since the queue was full) */\r
672 #undef traceQUEUE_SEND_FROM_ISR_FAILED\r
673 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \\r
674         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, UNUSED, pxQueue);\r
675 \r
676 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */\r
677 #undef traceQUEUE_RECEIVE_FROM_ISR\r
678 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \\r
679         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
680         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));\r
681 \r
682 /* Called when a message receive from interrupt context fails (since the queue was empty) */\r
683 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED\r
684 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \\r
685         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, UNUSED, pxQueue);\r
686 \r
687 /* Called in vTaskPrioritySet */\r
688 #undef traceTASK_PRIORITY_SET\r
689 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \\r
690         trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);\r
691 \r
692 /* Called in vTaskPriorityInherit, which is called by Mutex operations */\r
693 #undef traceTASK_PRIORITY_INHERIT\r
694 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \\r
695         trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);\r
696 \r
697 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */\r
698 #undef traceTASK_PRIORITY_DISINHERIT\r
699 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \\r
700         trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);\r
701 \r
702 /* Called in vTaskResume */\r
703 #undef traceTASK_RESUME\r
704 #define traceTASK_RESUME( pxTaskToResume ) \\r
705         trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);\r
706 \r
707 /* Called in vTaskResumeFromISR */\r
708 #undef traceTASK_RESUME_FROM_ISR\r
709 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \\r
710         trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);\r
711 \r
712 \r
713 #if (FREERTOS_VERSION >= FREERTOS_VERSION_8_0_OR_LATER)\r
714 \r
715 #if (INCLUDE_MEMMANG_EVENTS == 1)\r
716 \r
717 extern void vTraceStoreMemMangEvent(uint32_t ecode, uint32_t address, int32_t size);\r
718 \r
719 #undef traceMALLOC\r
720 #define traceMALLOC( pvAddress, uiSize ) {if (pvAddress != 0) vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, (int32_t)uiSize); }\r
721 \r
722 #undef traceFREE\r
723 #define traceFREE( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, (int32_t)(-uiSize)); }\r
724 \r
725 #endif\r
726 \r
727 /* Called in timer.c - xTimerCreate */\r
728 #undef traceTIMER_CREATE\r
729 #define traceTIMER_CREATE(tmr) \\r
730         trcKERNEL_HOOKS_TIMER_CREATE(TIMER_CREATE, tmr);\r
731 \r
732 #undef traceTIMER_CREATE_FAILED\r
733 #define traceTIMER_CREATE_FAILED() \\r
734         trcKERNEL_HOOKS_TIMER_EVENT(TIMER_CREATE_FAILED, 0);\r
735 \r
736 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */\r
737 #undef traceTIMER_COMMAND_SEND\r
738 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \\r
739 if (xCommandID > tmrCOMMAND_START_DONT_TRACE){\\r
740                 if (xCommandID == tmrCOMMAND_CHANGE_PERIOD) vTraceStoreKernelCallWithParam((xReturn == pdPASS) ? TIMER_CHANGE_PERIOD : TIMER_CHANGE_PERIOD_FAILED, TRACE_CLASS_TIMER, TRACE_GET_TIMER_NUMBER(tmr), xOptionalValue);\\r
741                 else if ((xCommandID == tmrCOMMAND_DELETE) && (xReturn == pdPASS)){ trcKERNEL_HOOKS_TIMER_DELETE(TIMER_DELETE, tmr); } \\r
742                 else {trcKERNEL_HOOKS_TIMER_EVENT(EVENTGROUP_TIMER + xCommandID + ((xReturn == pdPASS)?0:(TIMER_CREATE_FAILED - TIMER_CREATE)), tmr); }\\r
743 }\r
744 \r
745 #undef tracePEND_FUNC_CALL\r
746 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \\r
747 if (ret == pdPASS) \\r
748         vTraceStoreKernelCall(PEND_FUNC_CALL, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \\r
749 else \\r
750         vTraceStoreKernelCall(PEND_FUNC_CALL_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) );\r
751 \r
752 #undef tracePEND_FUNC_CALL_FROM_ISR\r
753 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \\r
754         if (! uiInEventGroupSetBitsFromISR) vTraceStoreKernelCall(PEND_FUNC_CALL_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \\r
755         uiInEventGroupSetBitsFromISR = 0;\r
756 #endif\r
757 \r
758 #undef traceEVENT_GROUP_CREATE\r
759 #define traceEVENT_GROUP_CREATE(eg) \\r
760         TRACE_SET_EVENTGROUP_NUMBER(eg); \\r
761         vTraceStoreKernelCall(EVENT_GROUP_CREATE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));\r
762 \r
763 #undef traceEVENT_GROUP_DELETE\r
764 #define traceEVENT_GROUP_DELETE(eg) \\r
765         vTraceStoreKernelCall(EVENT_GROUP_DELETE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg)); \\r
766         vTraceStoreObjectNameOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \\r
767         vTraceStoreObjectPropertiesOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \\r
768         vTraceFreeObjectHandle(TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));\r
769 \r
770 #undef traceEVENT_GROUP_CREATE_FAILED\r
771 #define traceEVENT_GROUP_CREATE_FAILED() \\r
772         vTraceStoreKernelCall(EVENT_GROUP_CREATE_FAILED, TRACE_CLASS_EVENTGROUP, 0);\r
773 \r
774 #undef traceEVENT_GROUP_SYNC_BLOCK\r
775 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \\r
776         vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);\r
777 \r
778 #undef traceEVENT_GROUP_SYNC_END\r
779 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \\r
780         if (wasTimeout){ vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);} \\r
781         else{ vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }\r
782 \r
783 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK\r
784 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \\r
785         vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); \\r
786         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
787 \r
788 #undef traceEVENT_GROUP_WAIT_BITS_END\r
789 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \\r
790         if (wasTimeout){ vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); } \\r
791         else{ vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }\r
792 \r
793 #undef traceEVENT_GROUP_CLEAR_BITS\r
794 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \\r
795         if (bitsToClear) vTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);\r
796 \r
797 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR\r
798 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \\r
799         if (bitsToClear) vTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);\r
800 \r
801 #undef traceEVENT_GROUP_SET_BITS\r
802 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \\r
803         vTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet);\r
804 \r
805 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR\r
806 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \\r
807         vTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet); \\r
808         uiInEventGroupSetBitsFromISR = 1;\r
809 \r
810 #undef traceTASK_NOTIFY_TAKE\r
811 #define traceTASK_NOTIFY_TAKE() \\r
812         if (pxCurrentTCB->eNotifyState == eNotified) \\r
813                 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \\r
814         else \\r
815                 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);\r
816 \r
817 #undef traceTASK_NOTIFY_TAKE_BLOCK\r
818 #define traceTASK_NOTIFY_TAKE_BLOCK() \\r
819         vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_BLOCK, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \\r
820         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
821 \r
822 #undef traceTASK_NOTIFY_WAIT\r
823 #define traceTASK_NOTIFY_WAIT() \\r
824         if (pxCurrentTCB->eNotifyState == eNotified) \\r
825                 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \\r
826         else \\r
827                 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);\r
828 \r
829 #undef traceTASK_NOTIFY_WAIT_BLOCK\r
830 #define traceTASK_NOTIFY_WAIT_BLOCK() \\r
831         vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_BLOCK, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \\r
832         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
833 \r
834 #undef traceTASK_NOTIFY\r
835 #define traceTASK_NOTIFY() \\r
836         vTraceStoreKernelCall(TRACE_TASK_NOTIFY, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));\r
837 \r
838 #undef traceTASK_NOTIFY_FROM_ISR\r
839 #define traceTASK_NOTIFY_FROM_ISR() \\r
840         vTraceStoreKernelCall(TRACE_TASK_NOTIFY_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));\r
841         \r
842 #undef traceTASK_NOTIFY_GIVE_FROM_ISR\r
843 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \\r
844         vTraceStoreKernelCall(TRACE_TASK_NOTIFY_GIVE_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));\r
845 \r
846 /************************************************************************/\r
847 /* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE                 */\r
848 /************************************************************************/\r
849 \r
850 /* Returns the exclude state of the object */\r
851 uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType handle);\r
852 \r
853 #define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
854 #define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
855 #define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
856 \r
857 #define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
858 #define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
859 #define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
860 \r
861 #define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
862 #define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
863 #define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
864 \r
865 #define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
866 #define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
867 #define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
868 \r
869 #define TRACE_SET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)\r
870 #define TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)\r
871 #define TRACE_GET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)\r
872 \r
873 #define TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)\r
874 #define TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)\r
875 #define TRACE_GET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)\r
876 \r
877 \r
878 #define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
879 switch (objectclass) \\r
880 { \\r
881 case TRACE_CLASS_QUEUE: \\r
882         TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \\r
883         break; \\r
884 case TRACE_CLASS_SEMAPHORE: \\r
885         TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
886         break; \\r
887 case TRACE_CLASS_MUTEX: \\r
888         TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \\r
889         break; \\r
890 case TRACE_CLASS_TASK: \\r
891         TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \\r
892         break; \\r
893 case TRACE_CLASS_TIMER: \\r
894         TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(handle); \\r
895         break; \\r
896 case TRACE_CLASS_EVENTGROUP: \\r
897         TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(handle); \\r
898         break; \\r
899 }\r
900 \r
901 #define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
902 switch (objectclass) \\r
903 { \\r
904 case TRACE_CLASS_QUEUE: \\r
905         TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \\r
906         break; \\r
907 case TRACE_CLASS_SEMAPHORE: \\r
908         TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
909         break; \\r
910 case TRACE_CLASS_MUTEX: \\r
911         TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \\r
912         break; \\r
913 case TRACE_CLASS_TASK: \\r
914         TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \\r
915         break; \\r
916 case TRACE_CLASS_TIMER: \\r
917         TRACE_SET_TIMER_FLAG_ISEXCLUDED(handle); \\r
918         break; \\r
919 case TRACE_CLASS_EVENTGROUP: \\r
920         TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(handle); \\r
921         break; \\r
922 }\r
923 \r
924 /* Task */\r
925 #define vTraceExcludeTaskFromTrace(handle) \\r
926 TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
927 \r
928 #define vTraceIncludeTaskInTrace(handle) \\r
929 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
930 \r
931 \r
932 /* Queue */\r
933 #define vTraceExcludeQueueFromTrace(handle) \\r
934 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
935 \r
936 #define vTraceIncludeQueueInTrace(handle) \\r
937 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
938 \r
939 \r
940 /* Semaphore */\r
941 #define vTraceExcludeSemaphoreFromTrace(handle) \\r
942 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
943 \r
944 #define vTraceIncludeSemaphoreInTrace(handle) \\r
945 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
946 \r
947 \r
948 /* Mutex */\r
949 #define vTraceExcludeMutexFromTrace(handle) \\r
950 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
951 \r
952 #define vTraceIncludeMutexInTrace(handle) \\r
953 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
954 \r
955 /* Timer */\r
956 #define vTraceExcludeTimerFromTrace(handle) \\r
957 TRACE_SET_TIMER_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(handle));\r
958 \r
959 #define vTraceIncludeTimerInTrace(handle) \\r
960 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(handle));\r
961 \r
962 /* Event Group */\r
963 #define vTraceExcludeEventGroupFromTrace(handle) \\r
964 TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(handle));\r
965 \r
966 #define vTraceIncludeEventGroupInTrace(handle) \\r
967 TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(handle));\r
968 \r
969 \r
970 /* Kernel Services */\r
971 #define vTraceExcludeKernelServiceDelayFromTrace() \\r
972 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
973 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
974 \r
975 #define vTraceIncludeKernelServiceDelayInTrace() \\r
976 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
977 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
978 \r
979 /* HELPER MACROS FOR KERNEL SERVICES FOR OBJECTS */\r
980 #define vTraceExcludeKernelServiceSendFromTrace_HELPER(class) \\r
981 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
982 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
983 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
984 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
985 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
986 \r
987 #define vTraceIncludeKernelServiceSendInTrace_HELPER(class) \\r
988 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
989 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
990 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
991 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
992 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
993 \r
994 #define vTraceExcludeKernelServiceReceiveFromTrace_HELPER(class) \\r
995 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
996 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
997 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
998 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
999 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
1000 \r
1001 #define vTraceIncludeKernelServiceReceiveInTrace_HELPER(class) \\r
1002 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
1003 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
1004 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
1005 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
1006 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
1007 \r
1008 /* EXCLUDE AND INCLUDE FOR QUEUE */\r
1009 #define vTraceExcludeKernelServiceQueueSendFromTrace() \\r
1010 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
1011 \r
1012 #define vTraceIncludeKernelServiceQueueSendInTrace() \\r
1013 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_QUEUE);\r
1014 \r
1015 #define vTraceExcludeKernelServiceQueueReceiveFromTrace() \\r
1016 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
1017 \r
1018 #define vTraceIncludeKernelServiceQueueReceiveInTrace() \\r
1019 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_QUEUE);\r
1020 \r
1021 /* EXCLUDE AND INCLUDE FOR SEMAPHORE */\r
1022 #define vTraceExcludeKernelServiceSemaphoreSendFromTrace() \\r
1023 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
1024 \r
1025 #define vTraceIncludeKernelServicSemaphoreSendInTrace() \\r
1026 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
1027 \r
1028 #define vTraceExcludeKernelServiceSemaphoreReceiveFromTrace() \\r
1029 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
1030 \r
1031 #define vTraceIncludeKernelServiceSemaphoreReceiveInTrace() \\r
1032 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
1033 \r
1034 /* EXCLUDE AND INCLUDE FOR MUTEX */\r
1035 #define vTraceExcludeKernelServiceMutexSendFromTrace() \\r
1036 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
1037 \r
1038 #define vTraceIncludeKernelServiceMutexSendInTrace() \\r
1039 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_MUTEX);\r
1040 \r
1041 #define vTraceExcludeKernelServiceMutexReceiveFromTrace() \\r
1042 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
1043 \r
1044 #define vTraceIncludeKernelServiceMutexReceiveInTrace() \\r
1045 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_MUTEX);\r
1046 \r
1047 /************************************************************************/\r
1048 /* KERNEL SPECIFIC MACROS TO NAME OBJECTS, IF NECESSARY                          */\r
1049 /************************************************************************/\r
1050 #define vTraceSetQueueName(object, name) \\r
1051 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
1052 \r
1053 #define vTraceSetSemaphoreName(object, name) \\r
1054 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
1055 \r
1056 #define vTraceSetMutexName(object, name) \\r
1057 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
1058 \r
1059 #define vTraceSetEventGroupName(object, name) \\r
1060 vTraceSetObjectName(TRACE_CLASS_EVENTGROUP, (objectHandleType)uxEventGroupGetNumber(object), name);\r
1061 \r
1062 #undef traceQUEUE_REGISTRY_ADD\r
1063 #define traceQUEUE_REGISTRY_ADD(object, name) vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
1064 #endif\r
1065 \r
1066 #endif /* TRCKERNELPORTFREERTOS_H_ */\r
1067 \r
1068 \r
1069 \r
1070 \r
1071 \r
1072 \r
1073 \r
1074 \r