]> git.sur5r.net Git - freertos/blob - FreeRTOS-Plus/Source/FreeRTOS-Plus-Trace/Include/trcKernelPort.h
Update FreeRTOS+ components and demos to use typedef names introduced in FreeRTOS V8.
[freertos] / FreeRTOS-Plus / Source / FreeRTOS-Plus-Trace / Include / trcKernelPort.h
1 /*******************************************************************************\r
2  * Tracealyzer v2.6.0 Recorder Library\r
3  * Percepio AB, www.percepio.com\r
4  *\r
5  * trcKernelPort.h\r
6  *\r
7  * Kernel-specific functionality for FreeRTOS, used by the recorder library.\r
8  * \r
9  * Terms of Use\r
10  * This software is copyright Percepio AB. The recorder library is free for\r
11  * use together with Percepio products. You may distribute the recorder library\r
12  * in its original form, including modifications in trcHardwarePort.c/.h\r
13  * given that these modification are clearly marked as your own modifications\r
14  * and documented in the initial comment section of these source files. \r
15  * This software is the intellectual property of Percepio AB and may not be \r
16  * sold or in other ways commercially redistributed without explicit written \r
17  * permission by Percepio AB.\r
18  *\r
19  * Disclaimer \r
20  * The trace tool and recorder library is being delivered to you AS IS and \r
21  * Percepio AB makes no warranty as to its use or performance. Percepio AB does \r
22  * not and cannot warrant the performance or results you may obtain by using the \r
23  * software or documentation. Percepio AB make no warranties, express or \r
24  * implied, as to noninfringement of third party rights, merchantability, or \r
25  * fitness for any particular purpose. In no event will Percepio AB, its \r
26  * technology partners, or distributors be liable to you for any consequential, \r
27  * incidental or special damages, including any lost profits or lost savings, \r
28  * even if a representative of Percepio AB has been advised of the possibility \r
29  * of such damages, or for any claim by any third party. Some jurisdictions do \r
30  * not allow the exclusion or limitation of incidental, consequential or special \r
31  * damages, or the exclusion of implied warranties or limitations on how long an \r
32  * implied warranty may last, so the above limitations may not apply to you.\r
33  *\r
34  * Copyright Percepio AB, 2013.\r
35  * www.percepio.com\r
36  ******************************************************************************/\r
37 \r
38 \r
39 #ifndef TRCKERNELPORT_H_\r
40 #define TRCKERNELPORT_H_\r
41 \r
42 #include "FreeRTOS.h"   // Defines configUSE_TRACE_FACILITY\r
43 #include "trcHardwarePort.h"\r
44 \r
45 extern int uiInEventGroupSetBitsFromISR;\r
46 \r
47 #define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY\r
48 \r
49 #if (USE_TRACEALYZER_RECORDER == 1)\r
50 \r
51 /* Defines that must be set for the recorder to work properly */\r
52 #define TRACE_KERNEL_VERSION 0x1AA1\r
53 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */\r
54 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */\r
55 \r
56 #if ((SELECTED_PORT == PORT_ARM_CortexM) && (USE_PRIMASK_CS == 1))\r
57 \r
58 #define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status;\r
59 \r
60 #define TRACE_ENTER_CRITICAL_SECTION() { __irq_status = prvTraceGetIRQMask(); prvTraceDisableIRQ(); }\r
61 \r
62 #define TRACE_EXIT_CRITICAL_SECTION() { prvTraceSetIRQMask(__irq_status); }\r
63 \r
64 #define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_BEGIN\r
65 #define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_END\r
66 \r
67 #else\r
68 \r
69 #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()\r
70 #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()\r
71 \r
72 #define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY() recorder_busy++;\r
73 #define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY() recorder_busy--;\r
74 \r
75 #endif\r
76 \r
77 /************************************************************************/\r
78 /* KERNEL SPECIFIC OBJECT CONFIGURATION                                 */\r
79 /************************************************************************/\r
80 #define TRACE_NCLASSES 7\r
81 #define TRACE_CLASS_QUEUE ((traceObjectClass)0)\r
82 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)\r
83 #define TRACE_CLASS_MUTEX ((traceObjectClass)2)\r
84 #define TRACE_CLASS_TASK ((traceObjectClass)3)\r
85 #define TRACE_CLASS_ISR ((traceObjectClass)4)\r
86 #define TRACE_CLASS_TIMER ((traceObjectClass)5)\r
87 #define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6)\r
88 \r
89 #define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR + NTimer + NEventGroup)\r
90 \r
91 /* The size of the Object Property Table entries, in bytes, per object */\r
92 \r
93 /* Queue properties (except name):     current number of message in queue */\r
94 #define PropertyTableSizeQueue         (NameLenQueue + 1)      \r
95 \r
96 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */\r
97 #define PropertyTableSizeSemaphore     (NameLenSemaphore + 1) \r
98 \r
99 /* Mutex properties (except name):     owner (task handle, 0 = free) */\r
100 #define PropertyTableSizeMutex         (NameLenMutex + 1)         \r
101 \r
102 /* Task properties (except name):      Byte 0: Current priority\r
103                                        Byte 1: state (if already active) \r
104                                        Byte 2: legacy, not used\r
105                                        Byte 3: legacy, not used */\r
106 #define PropertyTableSizeTask         (NameLenTask + 4)\r
107 \r
108 /* ISR properties:                     Byte 0: priority\r
109                                        Byte 1: state (if already active) */\r
110 #define PropertyTableSizeISR          (NameLenISR + 2)\r
111 \r
112 /* NTimer properties:                  Byte 0: state (unused for now) */\r
113 #define PropertyTableSizeTimer        (NameLenTimer + 1)\r
114 \r
115 /* NEventGroup properties:            Byte 0-3: state (unused for now)*/\r
116 #define PropertyTableSizeEventGroup       (NameLenEventGroup + 4)\r
117 \r
118 \r
119 /* The layout of the byte array representing the Object Property Table */\r
120 #define StartIndexQueue            0\r
121 #define StartIndexSemaphore        StartIndexQueue     + NQueue * PropertyTableSizeQueue\r
122 #define StartIndexMutex            StartIndexSemaphore + NSemaphore * PropertyTableSizeSemaphore\r
123 #define StartIndexTask             StartIndexMutex     + NMutex * PropertyTableSizeMutex\r
124 #define StartIndexISR              StartIndexTask      + NTask * PropertyTableSizeTask\r
125 #define StartIndexTimer            StartIndexISR       + NISR * PropertyTableSizeISR\r
126 #define StartIndexEventGroup       StartIndexTimer     + NTimer * PropertyTableSizeTimer\r
127 \r
128 /* Number of bytes used by the object table */\r
129 #define TRACE_OBJECT_TABLE_SIZE    StartIndexEventGroup       + NEventGroup * PropertyTableSizeEventGroup\r
130 \r
131 /* Includes */\r
132 #include "trcConfig.h" /* Must be first, even before trcTypes.h */\r
133 #include "trcHardwarePort.h"\r
134 #include "trcTypes.h"\r
135 #include "trcKernelHooks.h"\r
136 #include "trcBase.h"\r
137 #include "trcKernel.h"\r
138 #include "trcUser.h"\r
139 \r
140 #if (INCLUDE_NEW_TIME_EVENTS == 1 && configUSE_TICKLESS_IDLE != 0)\r
141 #error "NewTime events can not be used in combination with tickless idle!"\r
142 #endif\r
143 \r
144 /* Initialization of the object property table */\r
145 void vTraceInitObjectPropertyTable(void);\r
146 \r
147 /* Initialization of the handle mechanism, see e.g, xTraceGetObjectHandle */\r
148 void vTraceInitObjectHandleStack(void);\r
149 \r
150 /* Returns the "Not enough handles" error message for the specified object class */\r
151 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);\r
152 \r
153 /*******************************************************************************\r
154  * The event codes - should match the offline config file.\r
155  * \r
156  * Some sections below are encoded to allow for constructions like:\r
157  *\r
158  *  vTraceStoreKernelCall(EVENTGROUP_CREATE + objectclass, ...\r
159  *\r
160  * The object class ID is given by the three LSB bits, in such cases. Since each \r
161  * object class has a separate object property table, the class ID is needed to \r
162  * know what section in the object table to use for getting an object name from\r
163  * an object handle. \r
164  ******************************************************************************/\r
165 \r
166 #define NULL_EVENT                   (0x00)  /* Ignored in the analysis*/\r
167 \r
168 /*******************************************************************************\r
169  * EVENTGROUP_DIV\r
170  *\r
171  * Miscellaneous events.\r
172  ******************************************************************************/\r
173 #define EVENTGROUP_DIV               (NULL_EVENT + 1)                   /*0x01*/\r
174 #define DIV_XPS                      (EVENTGROUP_DIV + 0)               /*0x01*/\r
175 #define DIV_TASK_READY               (EVENTGROUP_DIV + 1)               /*0x02*/\r
176 #define DIV_NEW_TIME                 (EVENTGROUP_DIV + 2)               /*0x03*/\r
177 \r
178 /*******************************************************************************\r
179  * EVENTGROUP_TS\r
180  *\r
181  * Events for storing task-switches and interrupts. The RESUME events are \r
182  * generated if the task/interrupt is already marked active.\r
183  ******************************************************************************/\r
184 #define EVENTGROUP_TS                (EVENTGROUP_DIV + 3)               /*0x04*/\r
185 #define TS_ISR_BEGIN                 (EVENTGROUP_TS + 0)                /*0x04*/\r
186 #define TS_ISR_RESUME                (EVENTGROUP_TS + 1)                /*0x05*/\r
187 #define TS_TASK_BEGIN                (EVENTGROUP_TS + 2)                /*0x06*/\r
188 #define TS_TASK_RESUME               (EVENTGROUP_TS + 3)                /*0x07*/\r
189 \r
190 /*******************************************************************************\r
191  * EVENTGROUP_OBJCLOSE_NAME\r
192  * \r
193  * About Close Events\r
194  * When an object is evicted from the object property table (object close), two \r
195  * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and \r
196  * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object \r
197  * properties valid up to this point.\r
198  ******************************************************************************/\r
199 #define EVENTGROUP_OBJCLOSE_NAME     (EVENTGROUP_TS + 4)                /*0x08*/\r
200 \r
201 /*******************************************************************************\r
202  * EVENTGROUP_OBJCLOSE_PROP\r
203  * \r
204  * The internal event carrying properties of deleted objects\r
205  * The handle and object class of the closed object is not stored in this event, \r
206  * but is assumed to be the same as in the preceding CLOSE event. Thus, these \r
207  * two events must be generated from within a critical section. \r
208  * When queues are closed, arg1 is the "state" property (i.e., number of \r
209  * buffered messages/signals).\r
210  * When actors are closed, arg1 is priority, arg2 is handle of the "instance \r
211  * finish" event, and arg3 is event code of the "instance finish" event. \r
212  * In this case, the lower three bits is the object class of the instance finish \r
213  * handle. The lower three bits are not used (always zero) when queues are \r
214  * closed since the queue type is given in the previous OBJCLOSE_NAME event.\r
215  ******************************************************************************/\r
216 #define EVENTGROUP_OBJCLOSE_PROP     (EVENTGROUP_OBJCLOSE_NAME + 8)     /*0x10*/\r
217 \r
218 /*******************************************************************************\r
219  * EVENTGROUP_CREATE\r
220  * \r
221  * The events in this group are used to log Kernel object creations.\r
222  * The lower three bits in the event code gives the object class, i.e., type of\r
223  * create operation (task, queue, semaphore, etc).\r
224  ******************************************************************************/\r
225 #define EVENTGROUP_CREATE_OBJ_SUCCESS    (EVENTGROUP_OBJCLOSE_PROP + 8)             /*0x18*/\r
226 \r
227 /*******************************************************************************\r
228  * EVENTGROUP_SEND\r
229  * \r
230  * The events in this group are used to log Send/Give events on queues, \r
231  * semaphores and mutexes The lower three bits in the event code gives the \r
232  * object class, i.e., what type of object that is operated on (queue, semaphore \r
233  * or mutex).\r
234  ******************************************************************************/\r
235 #define EVENTGROUP_SEND_SUCCESS      (EVENTGROUP_CREATE_OBJ_SUCCESS + 8)                    /*0x20*/\r
236 \r
237 /*******************************************************************************\r
238  * EVENTGROUP_RECEIVE\r
239  * \r
240  * The events in this group are used to log Receive/Take events on queues, \r
241  * semaphores and mutexes. The lower three bits in the event code gives the \r
242  * object class, i.e., what type of object that is operated on (queue, semaphore\r
243  * or mutex).\r
244  ******************************************************************************/\r
245 #define EVENTGROUP_RECEIVE_SUCCESS                       (EVENTGROUP_SEND_SUCCESS + 8)  /*0x28*/\r
246 \r
247 /* Send/Give operations, from ISR */\r
248 #define EVENTGROUP_SEND_FROM_ISR_SUCCESS              (EVENTGROUP_RECEIVE_SUCCESS + 8)  /*0x30*/\r
249 \r
250 /* Receive/Take operations, from ISR */\r
251 #define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS     (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8)  /*0x38*/\r
252 \r
253 /* "Failed" event type versions of above (timeout, failed allocation, etc) */\r
254 #define EVENTGROUP_KSE_FAILED         (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8) /*0x40*/\r
255 \r
256 /* Failed create calls - memory allocation failed */\r
257 #define EVENTGROUP_CREATE_OBJ_FAILED                (EVENTGROUP_KSE_FAILED) /*0x40*/\r
258 \r
259 /* Failed send/give - timeout! */\r
260 #define EVENTGROUP_SEND_FAILED           (EVENTGROUP_CREATE_OBJ_FAILED + 8) /*0x48*/\r
261 \r
262 /* Failed receive/take - timeout! */\r
263 #define EVENTGROUP_RECEIVE_FAILED          (EVENTGROUP_SEND_FAILED + 8) /*0x50*/\r
264 \r
265 /* Failed non-blocking send/give - queue full */\r
266 #define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8) /*0x58*/\r
267 \r
268 /* Failed non-blocking receive/take - queue empty */\r
269 #define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \\r
270                                   (EVENTGROUP_SEND_FROM_ISR_FAILED + 8) /*0x60*/\r
271 \r
272 /* Events when blocking on receive/take */\r
273 #define EVENTGROUP_RECEIVE_BLOCK \\r
274                                (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8) /*0x68*/\r
275 \r
276 /* Events when blocking on send/give */\r
277 #define EVENTGROUP_SEND_BLOCK     (EVENTGROUP_RECEIVE_BLOCK + 8)  /*0x70*/\r
278 \r
279 /* Events on queue peek (receive) */\r
280 #define EVENTGROUP_PEEK_SUCCESS              (EVENTGROUP_SEND_BLOCK + 8)     /*0x78*/\r
281 \r
282 /* Events on object delete (vTaskDelete or vQueueDelete) */\r
283 #define EVENTGROUP_DELETE_OBJ_SUCCESS            (EVENTGROUP_PEEK_SUCCESS + 8)              /*0x80*/\r
284 \r
285 /* Other events - object class is implied: TASK */\r
286 #define EVENTGROUP_OTHERS            (EVENTGROUP_DELETE_OBJ_SUCCESS + 8)            /*0x88*/\r
287 #define TASK_DELAY_UNTIL             (EVENTGROUP_OTHERS + 0)            /*0x88*/\r
288 #define TASK_DELAY                   (EVENTGROUP_OTHERS + 1)            /*0x89*/\r
289 #define TASK_SUSPEND                 (EVENTGROUP_OTHERS + 2)            /*0x8A*/\r
290 #define TASK_RESUME                  (EVENTGROUP_OTHERS + 3)            /*0x8B*/\r
291 #define TASK_RESUME_FROM_ISR         (EVENTGROUP_OTHERS + 4)            /*0x8C*/\r
292 #define TASK_PRIORITY_SET            (EVENTGROUP_OTHERS + 5)            /*0x8D*/\r
293 #define TASK_PRIORITY_INHERIT        (EVENTGROUP_OTHERS + 6)            /*0x8E*/\r
294 #define TASK_PRIORITY_DISINHERIT     (EVENTGROUP_OTHERS + 7)            /*0x8F*/\r
295 \r
296 #define EVENTGROUP_MISC_PLACEHOLDER    (EVENTGROUP_OTHERS + 8)                  /*0x90*/\r
297 #define PEND_FUNC_CALL (EVENTGROUP_MISC_PLACEHOLDER+0)                                  /*0x90*/\r
298 #define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1)                 /*0x91*/\r
299 #define PEND_FUNC_CALL_FAILED (EVENTGROUP_MISC_PLACEHOLDER+2)                   /*0x92*/\r
300 #define PEND_FUNC_CALL_FROM_ISR_FAILED (EVENTGROUP_MISC_PLACEHOLDER+3)  /*0x93*/\r
301 #define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4)                                 /*0x94*/\r
302 #define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5)                                 /*0x95*/\r
303 #define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6)                                   /*0x96*/\r
304 #define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7)                                   /*0x97*/\r
305 \r
306 /* User events */\r
307 #define EVENTGROUP_USEREVENT (EVENTGROUP_MISC_PLACEHOLDER + 8)        /*0x98*/\r
308 #define USER_EVENT (EVENTGROUP_USEREVENT + 0)\r
309 \r
310 /* Allow for 0-15 arguments (the number of args is added to event code) */\r
311 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15)                     /*0xA7*/\r
312 \r
313 /*******************************************************************************\r
314  * XTS Event - eXtended TimeStamp events\r
315  * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.\r
316  * the time since the last stored event. The DTS fields are either 1 or 2 bytes \r
317  * in the other events, depending on the bytes available in the event struct. \r
318  * If the time since the last event (the DTS) is larger than allowed for by \r
319  * the DTS field of the current event, an XTS event is inserted immediately \r
320  * before the original event. The XTS event contains up to 3 additional bytes \r
321  * of the DTS value - the higher bytes of the true DTS value. The lower 1-2 \r
322  * bytes are stored in the normal DTS field. \r
323  * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored \r
324  * when there is only room for 1 byte (8 bit) DTS data in the original event, \r
325  * which means a limit of 0xFF (255). The XTS16 is used when the original event \r
326  * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535).\r
327  * \r
328  * Using a very high frequency time base can result in many XTS events. \r
329  * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,\r
330  * at most 65535. If your time base has a higher frequency, you can define\r
331  * the TRACE\r
332  ******************************************************************************/\r
333 \r
334 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16)                      /*0xA8*/\r
335 #define XTS8 (EVENTGROUP_SYS + 0)                                       /*0xA8*/\r
336 #define XTS16 (EVENTGROUP_SYS + 1)                                      /*0xA9*/\r
337 \r
338 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2)                        /*0xAA*/\r
339 \r
340 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3)                        /*0xAB*/\r
341 \r
342 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4)                                                    /*0xAC*/\r
343 #define LOW_POWER_END (EVENTGROUP_SYS + 5)                                                              /*0xAD*/\r
344 \r
345 #define XID (EVENTGROUP_SYS + 6)                                                                                /*0xAE*/\r
346 \r
347 #define XTS16L (EVENTGROUP_SYS + 7)                                                                             /*0xAF*/\r
348 \r
349 #define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8)                                                   /*0xB0*/\r
350 \r
351 #define TIMER_CREATE (EVENTGROUP_TIMER + 0)                                                             /*0xB0*/\r
352 #define TIMER_START (EVENTGROUP_TIMER + 1)                                                              /*0xB0*/\r
353 #define TIMER_RESET (EVENTGROUP_TIMER + 2)                                                              /*0xB1*/\r
354 #define TIMER_STOP (EVENTGROUP_TIMER + 3)                                                               /*0xB2*/\r
355 #define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4)                                              /*0xB3*/\r
356 #define TIMER_DELETE (EVENTGROUP_TIMER + 5)                                                             /*0xB4*/\r
357 #define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6)                                             /*0xB6*/\r
358 #define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7)                                             /*0xB7*/\r
359 #define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8)                                              /*0xB8*/\r
360 \r
361 #define TIMER_CREATE_FAILED (EVENTGROUP_TIMER + 9)                                              /*0xB9*/\r
362 #define TIMER_START_FAILED (EVENTGROUP_TIMER + 10)                                              /*0xBA*/\r
363 #define TIMER_RESET_FAILED (EVENTGROUP_TIMER + 11)                                              /*0xBB*/\r
364 #define TIMER_STOP_FAILED (EVENTGROUP_TIMER + 12)                                               /*0xBC*/\r
365 #define TIMER_CHANGE_PERIOD_FAILED (EVENTGROUP_TIMER + 13)                              /*0xBD*/\r
366 #define TIMER_DELETE_FAILED (EVENTGROUP_TIMER + 14)                                             /*0xBE*/\r
367 #define TIMER_START_FROM_ISR_FAILED (EVENTGROUP_TIMER + 15)                             /*0xBF*/\r
368 #define TIMER_RESET_FROM_ISR_FAILED (EVENTGROUP_TIMER + 16)                             /*0xC0*/\r
369 #define TIMER_STOP_FROM_ISR_FAILED (EVENTGROUP_TIMER + 17)                              /*0xC1*/\r
370 \r
371 #define EVENTGROUP_EG (EVENTGROUP_TIMER + 18)                                                   /*0xC2*/\r
372 #define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0)                                                  /*0xC2*/\r
373 #define EVENT_GROUP_CREATE_FAILED (EVENTGROUP_EG + 1)                                   /*0xC3*/\r
374 #define EVENT_GROUP_SYNC_BLOCK (EVENTGROUP_EG + 2)                                              /*0xC4*/\r
375 #define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3)                                                /*0xC5*/\r
376 #define EVENT_GROUP_WAIT_BITS_BLOCK (EVENTGROUP_EG + 4)                                 /*0xC6*/\r
377 #define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5)                                   /*0xC7*/\r
378 #define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6)                                              /*0xC8*/\r
379 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7)                             /*0xC9*/\r
380 #define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8)                                                /*0xCA*/\r
381 #define EVENT_GROUP_DELETE (EVENTGROUP_EG + 9)                                                  /*0xCB*/\r
382 #define EVENT_GROUP_SYNC_END_FAILED (EVENTGROUP_EG + 10)                                /*0xCC*/\r
383 #define EVENT_GROUP_WAIT_BITS_END_FAILED (EVENTGROUP_EG + 11)                   /*0xCD*/\r
384 #define EVENT_GROUP_SET_BITS_FROM_ISR  (EVENTGROUP_EG + 12)             /*0xCE*/\r
385 #define EVENT_GROUP_SET_BITS_FROM_ISR_FAILED (EVENTGROUP_EG + 13)               /*0xCF*/\r
386 \r
387 /************************************************************************/\r
388 /* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE             */\r
389 /* FUNCTIONALITY REQUESTED BY THE TRACE RECORDER                        */\r
390 /************************************************************************/\r
391 \r
392 /******************************************************************************\r
393  * TraceObjectClassTable\r
394  * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).\r
395  * This was added since we want to map both types of Mutex and both types of \r
396  * Semaphores on common classes for all Mutexes and all Semaphores respectively. \r
397  * \r
398  * FreeRTOS Queue types\r
399  * #define queueQUEUE_TYPE_BASE                  (0U) => TRACE_CLASS_QUEUE\r
400  * #define queueQUEUE_TYPE_MUTEX                 (1U) => TRACE_CLASS_MUTEX\r
401  * #define queueQUEUE_TYPE_COUNTING_SEMAPHORE    (2U) => TRACE_CLASS_SEMAPHORE\r
402  * #define queueQUEUE_TYPE_BINARY_SEMAPHORE      (3U) => TRACE_CLASS_SEMAPHORE\r
403  * #define queueQUEUE_TYPE_RECURSIVE_MUTEX       (4U) => TRACE_CLASS_MUTEX \r
404  ******************************************************************************/\r
405 \r
406 extern traceObjectClass TraceObjectClassTable[5];\r
407 \r
408 /* These functions are implemented in the .c file since certain header files must not be included in this one */\r
409 objectHandleType prvTraceGetObjectNumber(void* handle);\r
410 unsigned char prvTraceGetObjectType(void* handle);\r
411 objectHandleType prvTraceGetTaskNumber(void* handle);\r
412 unsigned char prvTraceIsSchedulerActive(void);\r
413 unsigned char prvTraceIsSchedulerSuspended(void);\r
414 unsigned char prvTraceIsSchedulerStarted(void);\r
415 void* prvTraceGetCurrentTaskHandle(void);\r
416 \r
417 #if (configUSE_TIMERS == 1)\r
418 #undef INCLUDE_xTimerGetTimerDaemonTaskHandle\r
419 #define INCLUDE_xTimerGetTimerDaemonTaskHandle 1\r
420 #endif\r
421 \r
422 /************************************************************************/\r
423 /* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER                    */\r
424 /************************************************************************/\r
425 \r
426 #define TRACE_MALLOC(size) pvPortMalloc(size)\r
427 #define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()\r
428 #define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()\r
429 #define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()\r
430 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()\r
431 \r
432 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)\r
433 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)\r
434 #define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))\r
435 #define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = xTraceGetObjectHandle(TRACE_CLASS_TASK);\r
436 \r
437 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]\r
438 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))\r
439 \r
440 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( objectHandleType ) ((Timer_t*)tmr)->uxTimerNumber )\r
441 #define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = xTraceGetObjectHandle(TRACE_CLASS_TIMER);\r
442 #define TRACE_GET_TIMER_NAME(pxTimer) pxTimer->pcTimerName\r
443 #define TRACE_GET_TIMER_PERIOD(pxTimer) pxTimer->xTimerPeriodInTicks\r
444 \r
445 #define TRACE_GET_EVENTGROUP_NUMBER(eg)  ( ( objectHandleType ) uxEventGroupGetNumber(eg) )\r
446 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = xTraceGetObjectHandle(TRACE_CLASS_EVENTGROUP);\r
447 \r
448 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))\r
449 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->uxQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));\r
450 \r
451 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))\r
452 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))\r
453 #define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)\r
454 \r
455 /************************************************************************/\r
456 /* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL         */\r
457 /************************************************************************/\r
458 \r
459 #if (configUSE_TICKLESS_IDLE != 0)\r
460 \r
461 #undef traceLOW_POWER_IDLE_BEGIN\r
462 #define traceLOW_POWER_IDLE_BEGIN() \\r
463         { \\r
464                 extern uint32_t trace_disable_timestamp; \\r
465                 vTraceStoreLowPower(0); \\r
466                 trace_disable_timestamp = 1; \\r
467         }       \r
468 \r
469 #undef traceLOW_POWER_IDLE_END\r
470 #define traceLOW_POWER_IDLE_END() \\r
471         { \\r
472                 extern uint32_t trace_disable_timestamp; \\r
473                 trace_disable_timestamp = 0; \\r
474                 vTraceStoreLowPower(1); \\r
475         }\r
476 \r
477 #endif\r
478 \r
479 /* A macro that will update the tick count when returning from tickless idle */\r
480 #undef traceINCREASE_TICK_COUNT\r
481 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks  will appear "compressed" on the time line.*/\r
482 #define traceINCREASE_TICK_COUNT( xCount ) { DWT_CYCLES_ADDED += (xCount * (TRACE_CPU_CLOCK_HZ / TRACE_TICK_RATE_HZ)); }\r
483 \r
484 /* Called for each task that becomes ready */\r
485 #undef traceMOVED_TASK_TO_READY_STATE\r
486 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \\r
487         trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);\r
488         \r
489 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */\r
490 #undef traceTASK_INCREMENT_TICK\r
491 #define traceTASK_INCREMENT_TICK( xTickCount ) \\r
492     if (uxSchedulerSuspended == ( UBaseType_t ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \\r
493         if (uxSchedulerSuspended == ( UBaseType_t ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\r
494 \r
495 /* Called on each task-switch */\r
496 #undef traceTASK_SWITCHED_IN\r
497 #define traceTASK_SWITCHED_IN() \\r
498         trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());\r
499 \r
500 /* Called on vTaskSuspend */\r
501 #undef traceTASK_SUSPEND\r
502 #define traceTASK_SUSPEND( pxTaskToSuspend ) \\r
503         trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);    \r
504 \r
505 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */\r
506 #undef traceTASK_DELAY\r
507 #define traceTASK_DELAY() \\r
508         trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \\r
509         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();   \r
510 \r
511 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */\r
512 #undef traceTASK_DELAY_UNTIL\r
513 #define traceTASK_DELAY_UNTIL() \\r
514         trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \\r
515         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
516 \r
517 #if (INCLUDE_OBJECT_DELETE == 1)\r
518 /* Called on vTaskDelete */\r
519 #undef traceTASK_DELETE\r
520 #define traceTASK_DELETE( pxTaskToDelete ) \\r
521         { TRACE_SR_ALLOC_CRITICAL_SECTION(); \\r
522         TRACE_ENTER_CRITICAL_SECTION(); \\r
523         trcKERNEL_HOOKS_TASK_DELETE(DELETE_OBJ, pxTaskToDelete); \\r
524         TRACE_EXIT_CRITICAL_SECTION(); }\r
525 #endif\r
526 \r
527 #if (INCLUDE_OBJECT_DELETE == 1)\r
528 /* Called on vQueueDelete */\r
529 #undef traceQUEUE_DELETE\r
530 #define traceQUEUE_DELETE( pxQueue ) \\r
531         { TRACE_SR_ALLOC_CRITICAL_SECTION(); \\r
532         TRACE_ENTER_CRITICAL_SECTION(); \\r
533         trcKERNEL_HOOKS_OBJECT_DELETE(DELETE_OBJ, UNUSED, pxQueue); \\r
534         TRACE_EXIT_CRITICAL_SECTION(); }\r
535 #endif\r
536 \r
537 /* Called on vTaskCreate */\r
538 #undef traceTASK_CREATE\r
539 #define traceTASK_CREATE(pxNewTCB) \\r
540         if (pxNewTCB != NULL) \\r
541         { \\r
542                 trcKERNEL_HOOKS_TASK_CREATE(CREATE_OBJ, UNUSED, pxNewTCB); \\r
543         }\r
544 \r
545 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */\r
546 #undef traceTASK_CREATE_FAILED\r
547 #define traceTASK_CREATE_FAILED() \\r
548         trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE_OBJ, UNUSED); \r
549 \r
550 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */\r
551 #undef traceQUEUE_CREATE\r
552 #define traceQUEUE_CREATE( pxNewQueue )\\r
553         trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, UNUSED, pxNewQueue);  \r
554 \r
555 /* Called in xQueueCreate, if the queue creation fails */\r
556 #undef traceQUEUE_CREATE_FAILED\r
557 #define traceQUEUE_CREATE_FAILED( queueType ) \\r
558         trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, UNUSED, queueType);\r
559 \r
560 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */\r
561 #undef traceCREATE_MUTEX\r
562 #define traceCREATE_MUTEX( pxNewQueue ) \\r
563         trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, UNUSED, pxNewQueue);\r
564         \r
565 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */\r
566 #undef traceCREATE_MUTEX_FAILED\r
567 #define traceCREATE_MUTEX_FAILED() \\r
568         trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, UNUSED, queueQUEUE_TYPE_MUTEX);\r
569 \r
570 /* Called when the Mutex can not be given, since not holder */\r
571 #undef traceGIVE_MUTEX_RECURSIVE_FAILED\r
572 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \\r
573         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxMutex);\r
574 \r
575 /* Called when a message is sent to a queue */   /* CS IS NEW ! */\r
576 #undef traceQUEUE_SEND\r
577 #define traceQUEUE_SEND( pxQueue ) \\r
578         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, UNUSED, pxQueue); \\r
579         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1));\r
580 \r
581 /* Called when a message failed to be sent to a queue (timeout) */\r
582 #undef traceQUEUE_SEND_FAILED\r
583 #define traceQUEUE_SEND_FAILED( pxQueue ) \\r
584     trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxQueue);\r
585 \r
586 /* Called when the task is blocked due to a send operation on a full queue */\r
587 #undef traceBLOCKING_ON_QUEUE_SEND\r
588 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \\r
589         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, UNUSED, pxQueue);\r
590 \r
591 /* Called when a message is received from a queue */\r
592 #undef traceQUEUE_RECEIVE\r
593 #define traceQUEUE_RECEIVE( pxQueue ) \\r
594         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, UNUSED, pxQueue); \\r
595         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1));\r
596 \r
597 /* Called when a receive operation on a queue fails (timeout) */\r
598 #undef traceQUEUE_RECEIVE_FAILED\r
599 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \\r
600         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, UNUSED, pxQueue);\r
601 \r
602 /* Called when the task is blocked due to a receive operation on an empty queue */\r
603 #undef traceBLOCKING_ON_QUEUE_RECEIVE\r
604 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \\r
605         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, UNUSED, pxQueue); \\r
606         if (TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \\r
607         {trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();}\r
608 \r
609 /* Called on xQueuePeek */\r
610 #undef traceQUEUE_PEEK\r
611 #define traceQUEUE_PEEK( pxQueue ) \\r
612         trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, UNUSED, pxQueue);\r
613 \r
614 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */\r
615 #undef traceQUEUE_SEND_FROM_ISR\r
616 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \\r
617         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
618         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));\r
619 \r
620 /* Called when a message send from interrupt context fails (since the queue was full) */\r
621 #undef traceQUEUE_SEND_FROM_ISR_FAILED\r
622 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \\r
623         trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, UNUSED, pxQueue);\r
624         \r
625 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */\r
626 #undef traceQUEUE_RECEIVE_FROM_ISR\r
627 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \\r
628         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, UNUSED, pxQueue); \\r
629         trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));\r
630         \r
631 /* Called when a message receive from interrupt context fails (since the queue was empty) */\r
632 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED\r
633 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \\r
634         trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, UNUSED, pxQueue);\r
635         \r
636 /* Called in vTaskPrioritySet */\r
637 #undef traceTASK_PRIORITY_SET\r
638 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \\r
639         trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);\r
640         \r
641 /* Called in vTaskPriorityInherit, which is called by Mutex operations */\r
642 #undef traceTASK_PRIORITY_INHERIT\r
643 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \\r
644         trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);\r
645         \r
646 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */\r
647 #undef traceTASK_PRIORITY_DISINHERIT\r
648 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \\r
649         trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);\r
650         \r
651 /* Called in vTaskResume */\r
652 #undef traceTASK_RESUME\r
653 #define traceTASK_RESUME( pxTaskToResume ) \\r
654         trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);\r
655         \r
656 /* Called in vTaskResumeFromISR */\r
657 #undef traceTASK_RESUME_FROM_ISR\r
658 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \\r
659         trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);\r
660         \r
661 \r
662 /* Called in timer.c - xTimerCreate */\r
663 #undef traceTIMER_CREATE\r
664 #define traceTIMER_CREATE(tmr) \\r
665         trcKERNEL_HOOKS_TIMER_CREATE(TIMER_CREATE, tmr);\r
666 \r
667 #undef traceTIMER_CREATE_FAILED\r
668 #define traceTIMER_CREATE_FAILED() \\r
669         trcKERNEL_HOOKS_TIMER_EVENT(TIMER_CREATE_FAILED, 0);\r
670                 \r
671 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */\r
672 #undef traceTIMER_COMMAND_SEND\r
673 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \\r
674 if (xCommandID > tmrCOMMAND_START_DONT_TRACE){\\r
675                 if (xCommandID == tmrCOMMAND_CHANGE_PERIOD) vTraceStoreKernelCallWithParam((xReturn == pdPASS) ? TIMER_CHANGE_PERIOD : TIMER_CHANGE_PERIOD_FAILED, TRACE_CLASS_TIMER, TRACE_GET_TIMER_NUMBER(tmr), xOptionalValue);\\r
676                 else if ((xCommandID == tmrCOMMAND_DELETE) && (xReturn == pdPASS)){ trcKERNEL_HOOKS_TIMER_DELETE(TIMER_DELETE, tmr); } \\r
677                 else {trcKERNEL_HOOKS_TIMER_EVENT(EVENTGROUP_TIMER + xCommandID + ((xReturn == pdPASS)?0:(TIMER_CREATE_FAILED - TIMER_CREATE)), tmr); }\\r
678 }\r
679 \r
680 #undef tracePEND_FUNC_CALL\r
681 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \\r
682 if (ret == pdPASS) \\r
683         vTraceStoreKernelCall(PEND_FUNC_CALL, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \\r
684 else \\r
685         vTraceStoreKernelCall(PEND_FUNC_CALL_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) );\r
686         \r
687 #undef tracePEND_FUNC_CALL_FROM_ISR\r
688 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \\r
689         if (! uiInEventGroupSetBitsFromISR) vTraceStoreKernelCall(PEND_FUNC_CALL_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \\r
690         uiInEventGroupSetBitsFromISR = 0;\r
691 \r
692 #undef traceEVENT_GROUP_CREATE\r
693 #define traceEVENT_GROUP_CREATE(eg) \\r
694         TRACE_SET_EVENTGROUP_NUMBER(eg); \\r
695         vTraceStoreKernelCall(EVENT_GROUP_CREATE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));\r
696 \r
697 #undef traceEVENT_GROUP_DELETE\r
698 #define traceEVENT_GROUP_DELETE(eg) \\r
699         vTraceStoreKernelCall(EVENT_GROUP_DELETE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg)); \\r
700         vTraceStoreObjectNameOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \\r
701         vTraceStoreObjectPropertiesOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \\r
702         vTraceFreeObjectHandle(TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));\r
703 \r
704 #undef traceEVENT_GROUP_CREATE_FAILED\r
705 #define traceEVENT_GROUP_CREATE_FAILED() \\r
706         vTraceStoreKernelCall(EVENT_GROUP_CREATE_FAILED, TRACE_CLASS_EVENTGROUP, 0);\r
707 \r
708 #undef traceEVENT_GROUP_SYNC_BLOCK\r
709 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \\r
710         vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);\r
711 \r
712 #undef traceEVENT_GROUP_SYNC_END\r
713 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \\r
714         if (wasTimeout){ vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);} \\r
715         else{ vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }\r
716 \r
717 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK\r
718 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \\r
719         vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); \\r
720         trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();\r
721 \r
722 #undef traceEVENT_GROUP_WAIT_BITS_END\r
723 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \\r
724         if (wasTimeout){ vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); } \\r
725         else{ vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }\r
726         \r
727 #undef traceEVENT_GROUP_CLEAR_BITS\r
728 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \\r
729         if (bitsToClear) vTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);\r
730 \r
731 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR\r
732 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \\r
733         if (bitsToClear) vTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);\r
734 \r
735 #undef traceEVENT_GROUP_SET_BITS\r
736 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \\r
737         vTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet);\r
738 \r
739 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR\r
740 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \\r
741         vTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet); \\r
742         uiInEventGroupSetBitsFromISR = 1;\r
743         \r
744 #if (INCLUDE_MEMMANG_EVENTS == 1)\r
745 \r
746 extern void vTraceStoreMemMangEvent(uint32_t ecode, uint32_t address, uint32_t size);\r
747 \r
748 #undef traceMALLOC\r
749 #define traceMALLOC( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, uiSize); TRACE_INCR_HEAP_USAGE(uiSize);}\r
750 \r
751         \r
752 #undef traceFREE\r
753 #define traceFREE( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, uiSize); TRACE_DECR_HEAP_USAGE(uiSize);}\r
754 \r
755 #endif\r
756 \r
757 /************************************************************************/\r
758 /* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE         */\r
759 /************************************************************************/\r
760 \r
761 /* Returns the exclude state of the object */\r
762 uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType handle);\r
763 \r
764 #define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
765 #define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
766 #define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)\r
767 \r
768 #define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
769 #define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
770 #define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)\r
771 \r
772 #define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
773 #define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
774 #define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)\r
775 \r
776 #define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
777 #define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
778 #define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)\r
779 \r
780 #define TRACE_SET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)\r
781 #define TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)\r
782 #define TRACE_GET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)\r
783 \r
784 #define TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)\r
785 #define TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)\r
786 #define TRACE_GET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)\r
787 \r
788 \r
789 #define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
790 switch (objectclass) \\r
791 { \\r
792 case TRACE_CLASS_QUEUE: \\r
793         TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \\r
794         break; \\r
795 case TRACE_CLASS_SEMAPHORE: \\r
796         TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
797         break; \\r
798 case TRACE_CLASS_MUTEX: \\r
799         TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \\r
800         break; \\r
801 case TRACE_CLASS_TASK: \\r
802         TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \\r
803         break; \\r
804 case TRACE_CLASS_TIMER: \\r
805         TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(handle); \\r
806         break; \\r
807 case TRACE_CLASS_EVENTGROUP: \\r
808         TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(handle); \\r
809         break; \\r
810 }\r
811 \r
812 #define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \\r
813 switch (objectclass) \\r
814 { \\r
815 case TRACE_CLASS_QUEUE: \\r
816         TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \\r
817         break; \\r
818 case TRACE_CLASS_SEMAPHORE: \\r
819         TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \\r
820         break; \\r
821 case TRACE_CLASS_MUTEX: \\r
822         TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \\r
823         break; \\r
824 case TRACE_CLASS_TASK: \\r
825         TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \\r
826         break; \\r
827 case TRACE_CLASS_TIMER: \\r
828         TRACE_SET_TIMER_FLAG_ISEXCLUDED(handle); \\r
829         break; \\r
830 case TRACE_CLASS_EVENTGROUP: \\r
831         TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(handle); \\r
832         break; \\r
833 }\r
834 \r
835 /* Task */\r
836 #define vTraceExcludeTaskFromTrace(handle) \\r
837 TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
838 \r
839 #define vTraceIncludeTaskInTrace(handle) \\r
840 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));\r
841 \r
842 \r
843 /* Queue */\r
844 #define vTraceExcludeQueueFromTrace(handle) \\r
845 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
846 \r
847 #define vTraceIncludeQueueInTrace(handle) \\r
848 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
849 \r
850 \r
851 /* Semaphore */\r
852 #define vTraceExcludeSemaphoreFromTrace(handle) \\r
853 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
854 \r
855 #define vTraceIncludeSemaphoreInTrace(handle) \\r
856 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
857 \r
858 \r
859 /* Mutex */\r
860 #define vTraceExcludeMutexFromTrace(handle) \\r
861 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
862 \r
863 #define vTraceIncludeMutexInTrace(handle) \\r
864 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));\r
865 \r
866 /* Timer */\r
867 #define vTraceExcludeTimerFromTrace(handle) \\r
868 TRACE_SET_TIMER_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(handle));\r
869 \r
870 #define vTraceIncludeTimerInTrace(handle) \\r
871 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(handle));\r
872 \r
873 /* Event Group */\r
874 #define vTraceExcludeEventGroupFromTrace(handle) \\r
875 TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(handle));\r
876 \r
877 #define vTraceIncludeEventGroupInTrace(handle) \\r
878 TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(handle));\r
879 \r
880 \r
881 /* Kernel Services */\r
882 #define vTraceExcludeKernelServiceDelayFromTrace() \\r
883 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
884 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
885 \r
886 #define vTraceIncludeKernelServiceDelayInTrace() \\r
887 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \\r
888 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);\r
889 \r
890 /* HELPER MACROS FOR KERNEL SERVICES FOR OBJECTS */\r
891 #define vTraceExcludeKernelServiceSendFromTrace_HELPER(class) \\r
892 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
893 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
894 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
895 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
896 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
897 \r
898 #define vTraceIncludeKernelServiceSendInTrace_HELPER(class) \\r
899 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \\r
900 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \\r
901 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \\r
902 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \\r
903 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);\r
904 \r
905 #define vTraceExcludeKernelServiceReceiveFromTrace_HELPER(class) \\r
906 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
907 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
908 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
909 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
910 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
911 \r
912 #define vTraceIncludeKernelServiceReceiveInTrace_HELPER(class) \\r
913 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \\r
914 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \\r
915 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \\r
916 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \\r
917 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);\r
918 \r
919 /* EXCLUDE AND INCLUDE FOR QUEUE */\r
920 #define vTraceExcludeKernelServiceQueueSendFromTrace() \\r
921 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
922 \r
923 #define vTraceIncludeKernelServiceQueueSendInTrace() \\r
924 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_QUEUE);\r
925 \r
926 #define vTraceExcludeKernelServiceQueueReceiveFromTrace() \\r
927 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_QUEUE);\r
928 \r
929 #define vTraceIncludeKernelServiceQueueReceiveInTrace() \\r
930 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_QUEUE);\r
931 \r
932 /* EXCLUDE AND INCLUDE FOR SEMAPHORE */\r
933 #define vTraceExcludeKernelServiceSemaphoreSendFromTrace() \\r
934 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
935 \r
936 #define vTraceIncludeKernelServicSemaphoreSendInTrace() \\r
937 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
938 \r
939 #define vTraceExcludeKernelServiceSemaphoreReceiveFromTrace() \\r
940 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
941 \r
942 #define vTraceIncludeKernelServiceSemaphoreReceiveInTrace() \\r
943 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_SEMAPHORE);\r
944 \r
945 /* EXCLUDE AND INCLUDE FOR MUTEX */\r
946 #define vTraceExcludeKernelServiceMutexSendFromTrace() \\r
947 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
948 \r
949 #define vTraceIncludeKernelServiceMutexSendInTrace() \\r
950 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_MUTEX);\r
951 \r
952 #define vTraceExcludeKernelServiceMutexReceiveFromTrace() \\r
953 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_MUTEX);\r
954 \r
955 #define vTraceIncludeKernelServiceMutexReceiveInTrace() \\r
956 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_MUTEX);\r
957 \r
958 /************************************************************************/\r
959 /* KERNEL SPECIFIC MACROS TO NAME OBJECTS, IF NECESSARY                 */\r
960 /************************************************************************/\r
961 #define vTraceSetQueueName(object, name) \\r
962 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
963 \r
964 #define vTraceSetSemaphoreName(object, name) \\r
965 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
966 \r
967 #define vTraceSetMutexName(object, name) \\r
968 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
969 \r
970 #define vTraceSetEventGroupName(object, name) \\r
971 vTraceSetObjectName(TRACE_CLASS_EVENTGROUP, uxEventGroupGetNumber(object), name);\r
972 \r
973 #undef traceQUEUE_REGISTRY_ADD\r
974 #define traceQUEUE_REGISTRY_ADD(object, name) vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);\r
975 #endif\r
976 \r
977 #endif /* TRCKERNELPORT_H_ */