1 /*******************************************************************************
\r
2 * Tracealyzer v3.0.2 Recorder Library
\r
3 * Percepio AB, www.percepio.com
\r
7 * Kernel-specific functionality for FreeRTOS, used by the recorder library.
\r
10 * This software is copyright Percepio AB. The recorder library is free for
\r
11 * use together with Percepio products. You may distribute the recorder library
\r
12 * in its original form, including modifications in trcHardwarePort.c/.h
\r
13 * given that these modification are clearly marked as your own modifications
\r
14 * and documented in the initial comment section of these source files.
\r
15 * This software is the intellectual property of Percepio AB and may not be
\r
16 * sold or in other ways commercially redistributed without explicit written
\r
17 * permission by Percepio AB.
\r
20 * The trace tool and recorder library is being delivered to you AS IS and
\r
21 * Percepio AB makes no warranty as to its use or performance. Percepio AB does
\r
22 * not and cannot warrant the performance or results you may obtain by using the
\r
23 * software or documentation. Percepio AB make no warranties, express or
\r
24 * implied, as to noninfringement of third party rights, merchantability, or
\r
25 * fitness for any particular purpose. In no event will Percepio AB, its
\r
26 * technology partners, or distributors be liable to you for any consequential,
\r
27 * incidental or special damages, including any lost profits or lost savings,
\r
28 * even if a representative of Percepio AB has been advised of the possibility
\r
29 * of such damages, or for any claim by any third party. Some jurisdictions do
\r
30 * not allow the exclusion or limitation of incidental, consequential or special
\r
31 * damages, or the exclusion of implied warranties or limitations on how long an
\r
32 * implied warranty may last, so the above limitations may not apply to you.
\r
34 * Tabs are used for indent in this file (1 tab = 4 spaces)
\r
36 * Copyright Percepio AB, 2014.
\r
38 ******************************************************************************/
\r
41 #ifndef TRCKERNELPORTFREERTOS_H
\r
42 #define TRCKERNELPORTFREERTOS_H
\r
44 #include "FreeRTOS.h" /* Defines configUSE_TRACE_FACILITY */
\r
45 #include "trcHardwarePort.h"
\r
47 extern int uiInEventGroupSetBitsFromISR;
\r
49 #define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY
\r
51 #if (USE_TRACEALYZER_RECORDER == 1)
\r
53 /* Defines that must be set for the recorder to work properly */
\r
54 #define TRACE_KERNEL_VERSION 0x1AA1
\r
55 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */
\r
56 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */
\r
58 #if (SELECTED_PORT == PORT_ARM_CortexM)
\r
60 /* Uses CMSIS API */
\r
62 #define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status;
\r
63 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __get_PRIMASK(); __set_PRIMASK(1);}
\r
64 #define TRACE_EXIT_CRITICAL_SECTION() {__set_PRIMASK(__irq_status);}
\r
68 #if (SELECTED_PORT == PORT_ARM_CORTEX_M0)
\r
69 #define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status;
\r
70 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
71 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
74 #if ((SELECTED_PORT == PORT_ARM_CORTEX_A9) || (SELECTED_PORT == PORT_Renesas_RX600) || (SELECTED_PORT == PORT_MICROCHIP_PIC32MX) || (SELECTED_PORT == PORT_MICROCHIP_PIC32MZ))
\r
75 #define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status;
\r
76 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
77 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
80 #if (SELECTED_PORT == PORT_Win32)
\r
81 /* In the Win32 port, there are no real interrupts, so we can use the normal critical sections */
\r
82 #define TRACE_SR_ALLOC_CRITICAL_SECTION()
\r
83 #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()
\r
84 #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()
\r
87 #ifndef TRACE_ENTER_CRITICAL_SECTION
\r
88 #error "This port has no valid definition for critical sections! See http://percepio.com/2014/10/27/how-to-define-critical-sections-for-the-recorder/"
\r
91 #if (SELECTED_PORT == PORT_ARM_CortexM)
\r
92 #define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_BEGIN
\r
93 #define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_END
\r
95 #define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY() recorder_busy++;
\r
96 #define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY() recorder_busy--;
\r
99 /*************************************************************************/
\r
100 /* KERNEL SPECIFIC OBJECT CONFIGURATION */
\r
101 /*************************************************************************/
\r
102 #define TRACE_NCLASSES 7
\r
103 #define TRACE_CLASS_QUEUE ((traceObjectClass)0)
\r
104 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)
\r
105 #define TRACE_CLASS_MUTEX ((traceObjectClass)2)
\r
106 #define TRACE_CLASS_TASK ((traceObjectClass)3)
\r
107 #define TRACE_CLASS_ISR ((traceObjectClass)4)
\r
108 #define TRACE_CLASS_TIMER ((traceObjectClass)5)
\r
109 #define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6)
\r
111 #define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR + NTimer + NEventGroup)
\r
113 /* The size of the Object Property Table entries, in bytes, per object */
\r
115 /* Queue properties (except name): current number of message in queue */
\r
116 #define PropertyTableSizeQueue (NameLenQueue + 1)
\r
118 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */
\r
119 #define PropertyTableSizeSemaphore (NameLenSemaphore + 1)
\r
121 /* Mutex properties (except name): owner (task handle, 0 = free) */
\r
122 #define PropertyTableSizeMutex (NameLenMutex + 1)
\r
124 /* Task properties (except name): Byte 0: Current priority
\r
125 Byte 1: state (if already active)
\r
126 Byte 2: legacy, not used
\r
127 Byte 3: legacy, not used */
\r
128 #define PropertyTableSizeTask (NameLenTask + 4)
\r
130 /* ISR properties: Byte 0: priority
\r
131 Byte 1: state (if already active) */
\r
132 #define PropertyTableSizeISR (NameLenISR + 2)
\r
134 /* NTimer properties: Byte 0: state (unused for now) */
\r
135 #define PropertyTableSizeTimer (NameLenTimer + 1)
\r
137 /* NEventGroup properties: Byte 0-3: state (unused for now)*/
\r
138 #define PropertyTableSizeEventGroup (NameLenEventGroup + 4)
\r
141 /* The layout of the byte array representing the Object Property Table */
\r
142 #define StartIndexQueue 0
\r
143 #define StartIndexSemaphore StartIndexQueue + NQueue * PropertyTableSizeQueue
\r
144 #define StartIndexMutex StartIndexSemaphore + NSemaphore * PropertyTableSizeSemaphore
\r
145 #define StartIndexTask StartIndexMutex + NMutex * PropertyTableSizeMutex
\r
146 #define StartIndexISR StartIndexTask + NTask * PropertyTableSizeTask
\r
147 #define StartIndexTimer StartIndexISR + NISR * PropertyTableSizeISR
\r
148 #define StartIndexEventGroup StartIndexTimer + NTimer * PropertyTableSizeTimer
\r
150 /* Number of bytes used by the object table */
\r
151 #define TRACE_OBJECT_TABLE_SIZE StartIndexEventGroup + NEventGroup * PropertyTableSizeEventGroup
\r
153 #define FREERTOS_VERSION_NOT_SET 0
\r
154 #define FREERTOS_VERSION_7_3_OR_7_4 1
\r
155 #define FREERTOS_VERSION_7_5_OR_7_6 2
\r
156 #define FREERTOS_VERSION_8_0_OR_LATER 3
\r
159 #include "trcConfig.h" /* Must be first, even before trcTypes.h */
\r
160 #include "trcHardwarePort.h"
\r
161 #include "trcTypes.h"
\r
162 #include "trcKernelHooks.h"
\r
163 #include "trcBase.h"
\r
164 #include "trcKernel.h"
\r
165 #include "trcUser.h"
\r
167 #if (INCLUDE_NEW_TIME_EVENTS == 1 && configUSE_TICKLESS_IDLE != 0)
\r
168 #error "NewTime events can not be used in combination with tickless idle!"
\r
171 /* Initialization of the object property table */
\r
172 void vTraceInitObjectPropertyTable(void);
\r
174 /* Initialization of the handle mechanism, see e.g, xTraceGetObjectHandle */
\r
175 void vTraceInitObjectHandleStack(void);
\r
177 /* Returns the "Not enough handles" error message for the specified object class */
\r
178 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
\r
180 /*******************************************************************************
\r
181 * The event codes - should match the offline config file.
\r
183 * Some sections below are encoded to allow for constructions like:
\r
185 * vTraceStoreKernelCall(EVENTGROUP_CREATE + objectclass, ...
\r
187 * The object class ID is given by the three LSB bits, in such cases. Since each
\r
188 * object class has a separate object property table, the class ID is needed to
\r
189 * know what section in the object table to use for getting an object name from
\r
190 * an object handle.
\r
191 ******************************************************************************/
\r
193 #define NULL_EVENT (0x00) /* Ignored in the analysis*/
\r
195 /*******************************************************************************
\r
198 * Miscellaneous events.
\r
199 ******************************************************************************/
\r
200 #define EVENTGROUP_DIV (NULL_EVENT + 1) /*0x01*/
\r
201 #define DIV_XPS (EVENTGROUP_DIV + 0) /*0x01*/
\r
202 #define DIV_TASK_READY (EVENTGROUP_DIV + 1) /*0x02*/
\r
203 #define DIV_NEW_TIME (EVENTGROUP_DIV + 2) /*0x03*/
\r
205 /*******************************************************************************
\r
208 * Events for storing task-switches and interrupts. The RESUME events are
\r
209 * generated if the task/interrupt is already marked active.
\r
210 ******************************************************************************/
\r
211 #define EVENTGROUP_TS (EVENTGROUP_DIV + 3) /*0x04*/
\r
212 #define TS_ISR_BEGIN (EVENTGROUP_TS + 0) /*0x04*/
\r
213 #define TS_ISR_RESUME (EVENTGROUP_TS + 1) /*0x05*/
\r
214 #define TS_TASK_BEGIN (EVENTGROUP_TS + 2) /*0x06*/
\r
215 #define TS_TASK_RESUME (EVENTGROUP_TS + 3) /*0x07*/
\r
217 /*******************************************************************************
\r
218 * EVENTGROUP_OBJCLOSE_NAME
\r
220 * About Close Events
\r
221 * When an object is evicted from the object property table (object close), two
\r
222 * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and
\r
223 * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object
\r
224 * properties valid up to this point.
\r
225 ******************************************************************************/
\r
226 #define EVENTGROUP_OBJCLOSE_NAME (EVENTGROUP_TS + 4) /*0x08*/
\r
228 /*******************************************************************************
\r
229 * EVENTGROUP_OBJCLOSE_PROP
\r
231 * The internal event carrying properties of deleted objects
\r
232 * The handle and object class of the closed object is not stored in this event,
\r
233 * but is assumed to be the same as in the preceding CLOSE event. Thus, these
\r
234 * two events must be generated from within a critical section.
\r
235 * When queues are closed, arg1 is the "state" property (i.e., number of
\r
236 * buffered messages/signals).
\r
237 * When actors are closed, arg1 is priority, arg2 is handle of the "instance
\r
238 * finish" event, and arg3 is event code of the "instance finish" event.
\r
239 * In this case, the lower three bits is the object class of the instance finish
\r
240 * handle. The lower three bits are not used (always zero) when queues are
\r
241 * closed since the queue type is given in the previous OBJCLOSE_NAME event.
\r
242 ******************************************************************************/
\r
243 #define EVENTGROUP_OBJCLOSE_PROP (EVENTGROUP_OBJCLOSE_NAME + 8) /*0x10*/
\r
245 /*******************************************************************************
\r
246 * EVENTGROUP_CREATE
\r
248 * The events in this group are used to log Kernel object creations.
\r
249 * The lower three bits in the event code gives the object class, i.e., type of
\r
250 * create operation (task, queue, semaphore, etc).
\r
251 ******************************************************************************/
\r
252 #define EVENTGROUP_CREATE_OBJ_SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8) /*0x18*/
\r
254 /*******************************************************************************
\r
257 * The events in this group are used to log Send/Give events on queues,
\r
258 * semaphores and mutexes The lower three bits in the event code gives the
\r
259 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
261 ******************************************************************************/
\r
262 #define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_OBJ_SUCCESS + 8) /*0x20*/
\r
264 /*******************************************************************************
\r
265 * EVENTGROUP_RECEIVE
\r
267 * The events in this group are used to log Receive/Take events on queues,
\r
268 * semaphores and mutexes. The lower three bits in the event code gives the
\r
269 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
271 ******************************************************************************/
\r
272 #define EVENTGROUP_RECEIVE_SUCCESS (EVENTGROUP_SEND_SUCCESS + 8) /*0x28*/
\r
274 /* Send/Give operations, from ISR */
\r
275 #define EVENTGROUP_SEND_FROM_ISR_SUCCESS \
\r
276 (EVENTGROUP_RECEIVE_SUCCESS + 8) /*0x30*/
\r
278 /* Receive/Take operations, from ISR */
\r
279 #define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS \
\r
280 (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8) /*0x38*/
\r
282 /* "Failed" event type versions of above (timeout, failed allocation, etc) */
\r
283 #define EVENTGROUP_KSE_FAILED \
\r
284 (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8) /*0x40*/
\r
286 /* Failed create calls - memory allocation failed */
\r
287 #define EVENTGROUP_CREATE_OBJ_FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/
\r
289 /* Failed send/give - timeout! */
\r
290 #define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_OBJ_FAILED + 8) /*0x48*/
\r
292 /* Failed receive/take - timeout! */
\r
293 #define EVENTGROUP_RECEIVE_FAILED (EVENTGROUP_SEND_FAILED + 8) /*0x50*/
\r
295 /* Failed non-blocking send/give - queue full */
\r
296 #define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8) /*0x58*/
\r
298 /* Failed non-blocking receive/take - queue empty */
\r
299 #define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \
\r
300 (EVENTGROUP_SEND_FROM_ISR_FAILED + 8) /*0x60*/
\r
302 /* Events when blocking on receive/take */
\r
303 #define EVENTGROUP_RECEIVE_BLOCK \
\r
304 (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8) /*0x68*/
\r
306 /* Events when blocking on send/give */
\r
307 #define EVENTGROUP_SEND_BLOCK (EVENTGROUP_RECEIVE_BLOCK + 8) /*0x70*/
\r
309 /* Events on queue peek (receive) */
\r
310 #define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8) /*0x78*/
\r
312 /* Events on object delete (vTaskDelete or vQueueDelete) */
\r
313 #define EVENTGROUP_DELETE_OBJ_SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8) /*0x80*/
\r
315 /* Other events - object class is implied: TASK */
\r
316 #define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_OBJ_SUCCESS + 8) /*0x88*/
\r
317 #define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0) /*0x88*/
\r
318 #define TASK_DELAY (EVENTGROUP_OTHERS + 1) /*0x89*/
\r
319 #define TASK_SUSPEND (EVENTGROUP_OTHERS + 2) /*0x8A*/
\r
320 #define TASK_RESUME (EVENTGROUP_OTHERS + 3) /*0x8B*/
\r
321 #define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4) /*0x8C*/
\r
322 #define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5) /*0x8D*/
\r
323 #define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6) /*0x8E*/
\r
324 #define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7) /*0x8F*/
\r
326 #define EVENTGROUP_MISC_PLACEHOLDER (EVENTGROUP_OTHERS + 8) /*0x90*/
\r
327 #define PEND_FUNC_CALL (EVENTGROUP_MISC_PLACEHOLDER+0) /*0x90*/
\r
328 #define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1) /*0x91*/
\r
329 #define PEND_FUNC_CALL_FAILED (EVENTGROUP_MISC_PLACEHOLDER+2) /*0x92*/
\r
330 #define PEND_FUNC_CALL_FROM_ISR_FAILED (EVENTGROUP_MISC_PLACEHOLDER+3) /*0x93*/
\r
331 #define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4) /*0x94*/
\r
332 #define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5) /*0x95*/
\r
333 #define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6) /*0x96*/
\r
334 #define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7) /*0x97*/
\r
337 #define EVENTGROUP_USEREVENT (EVENTGROUP_MISC_PLACEHOLDER + 8) /*0x98*/
\r
338 #define USER_EVENT (EVENTGROUP_USEREVENT + 0)
\r
340 /* Allow for 0-15 arguments (the number of args is added to event code) */
\r
341 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15) /*0xA7*/
\r
343 /*******************************************************************************
\r
344 * XTS Event - eXtended TimeStamp events
\r
345 * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.
\r
346 * the time since the last stored event. The DTS fields are either 1 or 2 bytes
\r
347 * in the other events, depending on the bytes available in the event struct.
\r
348 * If the time since the last event (the DTS) is larger than allowed for by
\r
349 * the DTS field of the current event, an XTS event is inserted immediately
\r
350 * before the original event. The XTS event contains up to 3 additional bytes
\r
351 * of the DTS value - the higher bytes of the true DTS value. The lower 1-2
\r
352 * bytes are stored in the normal DTS field.
\r
353 * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored
\r
354 * when there is only room for 1 byte (8 bit) DTS data in the original event,
\r
355 * which means a limit of 0xFF (255). The XTS16 is used when the original event
\r
356 * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535).
\r
358 * Using a very high frequency time base can result in many XTS events.
\r
359 * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,
\r
360 * at most 65535. If your time base has a higher frequency, you can define
\r
362 ******************************************************************************/
\r
364 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16) /*0xA8*/
\r
365 #define XTS8 (EVENTGROUP_SYS + 0) /*0xA8*/
\r
366 #define XTS16 (EVENTGROUP_SYS + 1) /*0xA9*/
\r
367 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2) /*0xAA*/
\r
368 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3) /*0xAB*/
\r
369 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4) /*0xAC*/
\r
370 #define LOW_POWER_END (EVENTGROUP_SYS + 5) /*0xAD*/
\r
371 #define XID (EVENTGROUP_SYS + 6) /*0xAE*/
\r
372 #define XTS16L (EVENTGROUP_SYS + 7) /*0xAF*/
\r
374 #define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8) /*0xB0*/
\r
375 #define TIMER_CREATE (EVENTGROUP_TIMER + 0) /*0xB0*/
\r
376 #define TIMER_START (EVENTGROUP_TIMER + 1) /*0xB1*/
\r
377 #define TIMER_RST (EVENTGROUP_TIMER + 2) /*0xB2*/
\r
378 #define TIMER_STOP (EVENTGROUP_TIMER + 3) /*0xB3*/
\r
379 #define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4) /*0xB4*/
\r
380 #define TIMER_DELETE (EVENTGROUP_TIMER + 5) /*0xB5*/
\r
381 #define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6) /*0xB6*/
\r
382 #define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7) /*0xB7*/
\r
383 #define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8) /*0xB8*/
\r
385 #define TIMER_CREATE_FAILED (EVENTGROUP_TIMER + 9) /*0xB9*/
\r
386 #define TIMER_START_FAILED (EVENTGROUP_TIMER + 10) /*0xBA*/
\r
387 #define TIMER_RESET_FAILED (EVENTGROUP_TIMER + 11) /*0xBB*/
\r
388 #define TIMER_STOP_FAILED (EVENTGROUP_TIMER + 12) /*0xBC*/
\r
389 #define TIMER_CHANGE_PERIOD_FAILED (EVENTGROUP_TIMER + 13) /*0xBD*/
\r
390 #define TIMER_DELETE_FAILED (EVENTGROUP_TIMER + 14) /*0xBE*/
\r
391 #define TIMER_START_FROM_ISR_FAILED (EVENTGROUP_TIMER + 15) /*0xBF*/
\r
392 #define TIMER_RESET_FROM_ISR_FAILED (EVENTGROUP_TIMER + 16) /*0xC0*/
\r
393 #define TIMER_STOP_FROM_ISR_FAILED (EVENTGROUP_TIMER + 17) /*0xC1*/
\r
395 #define EVENTGROUP_EG (EVENTGROUP_TIMER + 18) /*0xC2*/
\r
396 #define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0) /*0xC2*/
\r
397 #define EVENT_GROUP_CREATE_FAILED (EVENTGROUP_EG + 1) /*0xC3*/
\r
398 #define EVENT_GROUP_SYNC_BLOCK (EVENTGROUP_EG + 2) /*0xC4*/
\r
399 #define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3) /*0xC5*/
\r
400 #define EVENT_GROUP_WAIT_BITS_BLOCK (EVENTGROUP_EG + 4) /*0xC6*/
\r
401 #define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5) /*0xC7*/
\r
402 #define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6) /*0xC8*/
\r
403 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7) /*0xC9*/
\r
404 #define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8) /*0xCA*/
\r
405 #define EVENT_GROUP_DELETE (EVENTGROUP_EG + 9) /*0xCB*/
\r
406 #define EVENT_GROUP_SYNC_END_FAILED (EVENTGROUP_EG + 10) /*0xCC*/
\r
407 #define EVENT_GROUP_WAIT_BITS_END_FAILED (EVENTGROUP_EG + 11) /*0xCD*/
\r
408 #define EVENT_GROUP_SET_BITS_FROM_ISR (EVENTGROUP_EG + 12) /*0xCE*/
\r
409 #define EVENT_GROUP_SET_BITS_FROM_ISR_FAILED (EVENTGROUP_EG + 13) /*0xCF*/
\r
411 #define TASK_INSTANCE_FINISHED_NEXT_KSE (EVENTGROUP_EG + 14) /*0xD0*/
\r
412 #define TASK_INSTANCE_FINISHED_DIRECT (EVENTGROUP_EG + 15) /*0xD1*/
\r
414 #define TRACE_TASK_NOTIFY_GROUP (EVENTGROUP_EG + 16) /*0xD2*/
\r
415 #define TRACE_TASK_NOTIFY (TRACE_TASK_NOTIFY_GROUP + 0) /*0xD2*/
\r
416 #define TRACE_TASK_NOTIFY_TAKE (TRACE_TASK_NOTIFY_GROUP + 1) /*0xD3*/
\r
417 #define TRACE_TASK_NOTIFY_TAKE_BLOCK (TRACE_TASK_NOTIFY_GROUP + 2) /*0xD4*/
\r
418 #define TRACE_TASK_NOTIFY_TAKE_FAILED (TRACE_TASK_NOTIFY_GROUP + 3) /*0xD5*/
\r
419 #define TRACE_TASK_NOTIFY_WAIT (TRACE_TASK_NOTIFY_GROUP + 4) /*0xD6*/
\r
420 #define TRACE_TASK_NOTIFY_WAIT_BLOCK (TRACE_TASK_NOTIFY_GROUP + 5) /*0xD7*/
\r
421 #define TRACE_TASK_NOTIFY_WAIT_FAILED (TRACE_TASK_NOTIFY_GROUP + 6) /*0xD8*/
\r
422 #define TRACE_TASK_NOTIFY_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 7) /*0xD9*/
\r
423 #define TRACE_TASK_NOTIFY_GIVE_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 8) /*0xDA*/
\r
425 /************************************************************************/
\r
426 /* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE */
\r
427 /* FUNCTIONALITY REQUESTED BY THE TRACE RECORDER */
\r
428 /************************************************************************/
\r
430 /******************************************************************************
\r
431 * TraceObjectClassTable
\r
432 * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).
\r
433 * This was added since we want to map both types of Mutex and both types of
\r
434 * Semaphores on common classes for all Mutexes and all Semaphores respectively.
\r
436 * FreeRTOS Queue types
\r
437 * #define queueQUEUE_TYPE_BASE (0U) => TRACE_CLASS_QUEUE
\r
438 * #define queueQUEUE_TYPE_MUTEX (1U) => TRACE_CLASS_MUTEX
\r
439 * #define queueQUEUE_TYPE_COUNTING_SEMAPHORE (2U) => TRACE_CLASS_SEMAPHORE
\r
440 * #define queueQUEUE_TYPE_BINARY_SEMAPHORE (3U) => TRACE_CLASS_SEMAPHORE
\r
441 * #define queueQUEUE_TYPE_RECURSIVE_MUTEX (4U) => TRACE_CLASS_MUTEX
\r
442 ******************************************************************************/
\r
444 extern traceObjectClass TraceObjectClassTable[5];
\r
446 /* These functions are implemented in the .c file since certain header files
\r
447 must not be included in this one */
\r
448 objectHandleType prvTraceGetObjectNumber(void* handle);
\r
449 unsigned char prvTraceGetObjectType(void* handle);
\r
450 objectHandleType prvTraceGetTaskNumber(void* handle);
\r
451 unsigned char prvTraceIsSchedulerActive(void);
\r
452 unsigned char prvTraceIsSchedulerSuspended(void);
\r
453 unsigned char prvTraceIsSchedulerStarted(void);
\r
454 void* prvTraceGetCurrentTaskHandle(void);
\r
456 #if (configUSE_TIMERS == 1)
\r
457 #undef INCLUDE_xTimerGetTimerDaemonTaskHandle
\r
458 #define INCLUDE_xTimerGetTimerDaemonTaskHandle 1
\r
461 /************************************************************************/
\r
462 /* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER */
\r
463 /************************************************************************/
\r
465 #define TRACE_MALLOC(size) pvPortMalloc(size)
\r
466 #define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()
\r
467 #define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()
\r
468 #define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()
\r
469 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()
\r
471 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)
\r
472 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)
\r
473 #define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))
\r
474 #define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = xTraceGetObjectHandle(TRACE_CLASS_TASK);
\r
476 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]
\r
477 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))
\r
479 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( objectHandleType ) ((Timer_t*)tmr)->uxTimerNumber )
\r
480 #define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = xTraceGetObjectHandle(TRACE_CLASS_TIMER);
\r
481 #define TRACE_GET_TIMER_NAME(pxTimer) pxTimer->pcTimerName
\r
482 #define TRACE_GET_TIMER_PERIOD(pxTimer) pxTimer->xTimerPeriodInTicks
\r
484 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( objectHandleType ) uxEventGroupGetNumber(eg) )
\r
485 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = xTraceGetObjectHandle(TRACE_CLASS_EVENTGROUP);
\r
487 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))
\r
489 #if (FREERTOS_VERSION < FREERTOS_VERSION_8_0_OR_LATER)
\r
490 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->ucQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));
\r
492 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->uxQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));
\r
495 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))
\r
496 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))
\r
497 #define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)
\r
499 /************************************************************************/
\r
500 /* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL */
\r
501 /************************************************************************/
\r
503 #if (configUSE_TICKLESS_IDLE != 0)
\r
505 #undef traceLOW_POWER_IDLE_BEGIN
\r
506 #define traceLOW_POWER_IDLE_BEGIN() \
\r
508 extern uint32_t trace_disable_timestamp; \
\r
509 vTraceStoreLowPower(0); \
\r
510 trace_disable_timestamp = 1; \
\r
513 #undef traceLOW_POWER_IDLE_END
\r
514 #define traceLOW_POWER_IDLE_END() \
\r
516 extern uint32_t trace_disable_timestamp; \
\r
517 trace_disable_timestamp = 0; \
\r
518 vTraceStoreLowPower(1); \
\r
523 /* A macro that will update the tick count when returning from tickless idle */
\r
524 #undef traceINCREASE_TICK_COUNT
\r
525 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/
\r
526 #define traceINCREASE_TICK_COUNT( xCount ) { DWT_CYCLES_ADDED += (xCount * (TRACE_CPU_CLOCK_HZ / TRACE_TICK_RATE_HZ)); }
\r
528 /* Called for each task that becomes ready */
\r
529 #undef traceMOVED_TASK_TO_READY_STATE
\r
530 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
\r
531 trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);
\r
533 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
\r
534 #undef traceTASK_INCREMENT_TICK
\r
536 #if (FREERTOS_VERSION == FREERTOS_VERSION_7_3_OR_7_4)
\r
538 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
539 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
540 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
544 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
545 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
546 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
550 /* Called on each task-switch */
\r
551 #undef traceTASK_SWITCHED_IN
\r
552 #define traceTASK_SWITCHED_IN() \
\r
553 trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());
\r
555 /* Called on vTaskSuspend */
\r
556 #undef traceTASK_SUSPEND
\r
557 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
\r
558 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);
\r
560 /* Called from special case with timer only */
\r
561 #undef traceTASK_DELAY_SUSPEND
\r
562 #define traceTASK_DELAY_SUSPEND( pxTaskToSuspend ) \
\r
563 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend); \
\r
564 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
566 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
\r
567 #undef traceTASK_DELAY
\r
568 #define traceTASK_DELAY() \
\r
569 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \
\r
570 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
572 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
\r
573 #undef traceTASK_DELAY_UNTIL
\r
574 #define traceTASK_DELAY_UNTIL() \
\r
575 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
\r
576 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
578 #if (INCLUDE_OBJECT_DELETE == 1)
\r
579 /* Called on vTaskDelete */
\r
580 #undef traceTASK_DELETE
\r
581 #define traceTASK_DELETE( pxTaskToDelete ) \
\r
582 { TRACE_SR_ALLOC_CRITICAL_SECTION(); \
\r
583 TRACE_ENTER_CRITICAL_SECTION(); \
\r
584 trcKERNEL_HOOKS_TASK_DELETE(DELETE_OBJ, pxTaskToDelete); \
\r
585 TRACE_EXIT_CRITICAL_SECTION(); }
\r
588 #if (INCLUDE_OBJECT_DELETE == 1)
\r
589 /* Called on vQueueDelete */
\r
590 #undef traceQUEUE_DELETE
\r
591 #define traceQUEUE_DELETE( pxQueue ) \
\r
592 { TRACE_SR_ALLOC_CRITICAL_SECTION(); \
\r
593 TRACE_ENTER_CRITICAL_SECTION(); \
\r
594 trcKERNEL_HOOKS_OBJECT_DELETE(DELETE_OBJ, UNUSED, pxQueue); \
\r
595 TRACE_EXIT_CRITICAL_SECTION(); }
\r
598 /* Called on vTaskCreate */
\r
599 #undef traceTASK_CREATE
\r
600 #define traceTASK_CREATE(pxNewTCB) \
\r
601 if (pxNewTCB != NULL) \
\r
603 trcKERNEL_HOOKS_TASK_CREATE(CREATE_OBJ, UNUSED, pxNewTCB); \
\r
606 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
\r
607 #undef traceTASK_CREATE_FAILED
\r
608 #define traceTASK_CREATE_FAILED() \
\r
609 trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE_OBJ, UNUSED);
\r
611 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
\r
612 #undef traceQUEUE_CREATE
\r
613 #define traceQUEUE_CREATE( pxNewQueue )\
\r
614 trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, UNUSED, pxNewQueue);
\r
616 /* Called in xQueueCreate, if the queue creation fails */
\r
617 #undef traceQUEUE_CREATE_FAILED
\r
618 #define traceQUEUE_CREATE_FAILED( queueType ) \
\r
619 trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, UNUSED, queueType);
\r
621 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
\r
622 #undef traceCREATE_MUTEX
\r
623 #define traceCREATE_MUTEX( pxNewQueue ) \
\r
624 trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, UNUSED, pxNewQueue);
\r
626 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
\r
627 #undef traceCREATE_MUTEX_FAILED
\r
628 #define traceCREATE_MUTEX_FAILED() \
\r
629 trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, UNUSED, queueQUEUE_TYPE_MUTEX);
\r
631 /* Called when the Mutex can not be given, since not holder */
\r
632 #undef traceGIVE_MUTEX_RECURSIVE_FAILED
\r
633 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \
\r
634 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxMutex);
\r
636 /* Called when a message is sent to a queue */ /* CS IS NEW ! */
\r
637 #undef traceQUEUE_SEND
\r
638 #define traceQUEUE_SEND( pxQueue ) \
\r
639 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, UNUSED, pxQueue); \
\r
640 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
642 /* Called when a message failed to be sent to a queue (timeout) */
\r
643 #undef traceQUEUE_SEND_FAILED
\r
644 #define traceQUEUE_SEND_FAILED( pxQueue ) \
\r
645 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxQueue);
\r
647 /* Called when the task is blocked due to a send operation on a full queue */
\r
648 #undef traceBLOCKING_ON_QUEUE_SEND
\r
649 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
\r
650 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, UNUSED, pxQueue);
\r
652 /* Called when a message is received from a queue */
\r
653 #undef traceQUEUE_RECEIVE
\r
654 #define traceQUEUE_RECEIVE( pxQueue ) \
\r
655 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, UNUSED, pxQueue); \
\r
656 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
658 /* Called when a receive operation on a queue fails (timeout) */
\r
659 #undef traceQUEUE_RECEIVE_FAILED
\r
660 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
\r
661 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, UNUSED, pxQueue);
\r
663 /* Called when the task is blocked due to a receive operation on an empty queue */
\r
664 #undef traceBLOCKING_ON_QUEUE_RECEIVE
\r
665 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
\r
666 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, UNUSED, pxQueue); \
\r
667 if (TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \
\r
668 {trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();}
\r
670 /* Called on xQueuePeek */
\r
671 #undef traceQUEUE_PEEK
\r
672 #define traceQUEUE_PEEK( pxQueue ) \
\r
673 trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, UNUSED, pxQueue);
\r
675 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
\r
676 #undef traceQUEUE_SEND_FROM_ISR
\r
677 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
\r
678 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, UNUSED, pxQueue); \
\r
679 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
681 /* Called when a message send from interrupt context fails (since the queue was full) */
\r
682 #undef traceQUEUE_SEND_FROM_ISR_FAILED
\r
683 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
\r
684 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, UNUSED, pxQueue);
\r
686 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
\r
687 #undef traceQUEUE_RECEIVE_FROM_ISR
\r
688 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
\r
689 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, UNUSED, pxQueue); \
\r
690 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
692 /* Called when a message receive from interrupt context fails (since the queue was empty) */
\r
693 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
\r
694 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
\r
695 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, UNUSED, pxQueue);
\r
697 /* Called in vTaskPrioritySet */
\r
698 #undef traceTASK_PRIORITY_SET
\r
699 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
\r
700 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);
\r
702 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
\r
703 #undef traceTASK_PRIORITY_INHERIT
\r
704 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
\r
705 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);
\r
707 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
\r
708 #undef traceTASK_PRIORITY_DISINHERIT
\r
709 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
\r
710 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);
\r
712 /* Called in vTaskResume */
\r
713 #undef traceTASK_RESUME
\r
714 #define traceTASK_RESUME( pxTaskToResume ) \
\r
715 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);
\r
717 /* Called in vTaskResumeFromISR */
\r
718 #undef traceTASK_RESUME_FROM_ISR
\r
719 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
\r
720 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);
\r
723 #if (FREERTOS_VERSION >= FREERTOS_VERSION_8_0_OR_LATER)
\r
725 #if (INCLUDE_MEMMANG_EVENTS == 1)
\r
727 extern void vTraceStoreMemMangEvent(uint32_t ecode, uint32_t address, int32_t size);
\r
730 #define traceMALLOC( pvAddress, uiSize ) {if (pvAddress != 0) vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, (int32_t)uiSize); }
\r
733 #define traceFREE( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, (int32_t)(-uiSize)); }
\r
737 /* Called in timer.c - xTimerCreate */
\r
738 #undef traceTIMER_CREATE
\r
739 #define traceTIMER_CREATE(tmr) \
\r
740 trcKERNEL_HOOKS_TIMER_CREATE(TIMER_CREATE, tmr);
\r
742 #undef traceTIMER_CREATE_FAILED
\r
743 #define traceTIMER_CREATE_FAILED() \
\r
744 trcKERNEL_HOOKS_TIMER_EVENT(TIMER_CREATE_FAILED, 0);
\r
746 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */
\r
747 #undef traceTIMER_COMMAND_SEND
\r
748 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
\r
749 if (xCommandID > tmrCOMMAND_START_DONT_TRACE){\
\r
750 if (xCommandID == tmrCOMMAND_CHANGE_PERIOD) vTraceStoreKernelCallWithParam((xReturn == pdPASS) ? TIMER_CHANGE_PERIOD : TIMER_CHANGE_PERIOD_FAILED, TRACE_CLASS_TIMER, TRACE_GET_TIMER_NUMBER(tmr), xOptionalValue);\
\r
751 else if ((xCommandID == tmrCOMMAND_DELETE) && (xReturn == pdPASS)){ trcKERNEL_HOOKS_TIMER_DELETE(TIMER_DELETE, tmr); } \
\r
752 else {trcKERNEL_HOOKS_TIMER_EVENT(EVENTGROUP_TIMER + xCommandID + ((xReturn == pdPASS)?0:(TIMER_CREATE_FAILED - TIMER_CREATE)), tmr); }\
\r
755 #undef tracePEND_FUNC_CALL
\r
756 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \
\r
757 if (ret == pdPASS) \
\r
758 vTraceStoreKernelCall(PEND_FUNC_CALL, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \
\r
760 vTraceStoreKernelCall(PEND_FUNC_CALL_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) );
\r
762 #undef tracePEND_FUNC_CALL_FROM_ISR
\r
763 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \
\r
764 if (! uiInEventGroupSetBitsFromISR) vTraceStoreKernelCall(PEND_FUNC_CALL_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \
\r
765 uiInEventGroupSetBitsFromISR = 0;
\r
768 #undef traceEVENT_GROUP_CREATE
\r
769 #define traceEVENT_GROUP_CREATE(eg) \
\r
770 TRACE_SET_EVENTGROUP_NUMBER(eg); \
\r
771 vTraceStoreKernelCall(EVENT_GROUP_CREATE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));
\r
773 #undef traceEVENT_GROUP_DELETE
\r
774 #define traceEVENT_GROUP_DELETE(eg) \
\r
775 vTraceStoreKernelCall(EVENT_GROUP_DELETE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg)); \
\r
776 vTraceStoreObjectNameOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \
\r
777 vTraceStoreObjectPropertiesOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \
\r
778 vTraceFreeObjectHandle(TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));
\r
780 #undef traceEVENT_GROUP_CREATE_FAILED
\r
781 #define traceEVENT_GROUP_CREATE_FAILED() \
\r
782 vTraceStoreKernelCall(EVENT_GROUP_CREATE_FAILED, TRACE_CLASS_EVENTGROUP, 0);
\r
784 #undef traceEVENT_GROUP_SYNC_BLOCK
\r
785 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \
\r
786 vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);
\r
788 #undef traceEVENT_GROUP_SYNC_END
\r
789 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \
\r
790 if (wasTimeout){ vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);} \
\r
791 else{ vTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }
\r
793 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
\r
794 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \
\r
795 vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); \
\r
796 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
798 #undef traceEVENT_GROUP_WAIT_BITS_END
\r
799 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \
\r
800 if (wasTimeout){ vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); } \
\r
801 else{ vTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }
\r
803 #undef traceEVENT_GROUP_CLEAR_BITS
\r
804 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
\r
805 if (bitsToClear) vTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);
\r
807 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
\r
808 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
\r
809 if (bitsToClear) vTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);
\r
811 #undef traceEVENT_GROUP_SET_BITS
\r
812 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
\r
813 vTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet);
\r
815 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
\r
816 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
\r
817 vTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet); \
\r
818 uiInEventGroupSetBitsFromISR = 1;
\r
820 #undef traceTASK_NOTIFY_TAKE
\r
821 #define traceTASK_NOTIFY_TAKE() \
\r
822 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
823 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
825 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);
\r
827 #undef traceTASK_NOTIFY_TAKE_BLOCK
\r
828 #define traceTASK_NOTIFY_TAKE_BLOCK() \
\r
829 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_BLOCK, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
830 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
832 #undef traceTASK_NOTIFY_WAIT
\r
833 #define traceTASK_NOTIFY_WAIT() \
\r
834 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
835 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
837 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);
\r
839 #undef traceTASK_NOTIFY_WAIT_BLOCK
\r
840 #define traceTASK_NOTIFY_WAIT_BLOCK() \
\r
841 vTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_BLOCK, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
842 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
844 #undef traceTASK_NOTIFY
\r
845 #define traceTASK_NOTIFY() \
\r
846 vTraceStoreKernelCall(TRACE_TASK_NOTIFY, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));
\r
848 #undef traceTASK_NOTIFY_FROM_ISR
\r
849 #define traceTASK_NOTIFY_FROM_ISR() \
\r
850 vTraceStoreKernelCall(TRACE_TASK_NOTIFY_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));
\r
852 #undef traceTASK_NOTIFY_GIVE_FROM_ISR
\r
853 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \
\r
854 vTraceStoreKernelCall(TRACE_TASK_NOTIFY_GIVE_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));
\r
856 /************************************************************************/
\r
857 /* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE */
\r
858 /************************************************************************/
\r
860 /* Returns the exclude state of the object */
\r
861 uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType handle);
\r
863 #define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)
\r
864 #define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, queueIndex)
\r
865 #define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)
\r
867 #define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)
\r
868 #define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)
\r
869 #define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)
\r
871 #define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)
\r
872 #define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)
\r
873 #define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)
\r
875 #define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
\r
876 #define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
\r
877 #define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
\r
879 #define TRACE_SET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)
\r
880 #define TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)
\r
881 #define TRACE_GET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)
\r
883 #define TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)
\r
884 #define TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)
\r
885 #define TRACE_GET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)
\r
888 #define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
\r
889 switch (objectclass) \
\r
891 case TRACE_CLASS_QUEUE: \
\r
892 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \
\r
894 case TRACE_CLASS_SEMAPHORE: \
\r
895 TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \
\r
897 case TRACE_CLASS_MUTEX: \
\r
898 TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \
\r
900 case TRACE_CLASS_TASK: \
\r
901 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \
\r
903 case TRACE_CLASS_TIMER: \
\r
904 TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(handle); \
\r
906 case TRACE_CLASS_EVENTGROUP: \
\r
907 TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(handle); \
\r
911 #define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
\r
912 switch (objectclass) \
\r
914 case TRACE_CLASS_QUEUE: \
\r
915 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \
\r
917 case TRACE_CLASS_SEMAPHORE: \
\r
918 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \
\r
920 case TRACE_CLASS_MUTEX: \
\r
921 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \
\r
923 case TRACE_CLASS_TASK: \
\r
924 TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \
\r
926 case TRACE_CLASS_TIMER: \
\r
927 TRACE_SET_TIMER_FLAG_ISEXCLUDED(handle); \
\r
929 case TRACE_CLASS_EVENTGROUP: \
\r
930 TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(handle); \
\r
935 #define vTraceExcludeTaskFromTrace(handle) \
\r
936 TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));
\r
938 #define vTraceIncludeTaskInTrace(handle) \
\r
939 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));
\r
943 #define vTraceExcludeQueueFromTrace(handle) \
\r
944 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
946 #define vTraceIncludeQueueInTrace(handle) \
\r
947 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
951 #define vTraceExcludeSemaphoreFromTrace(handle) \
\r
952 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
954 #define vTraceIncludeSemaphoreInTrace(handle) \
\r
955 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
959 #define vTraceExcludeMutexFromTrace(handle) \
\r
960 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
962 #define vTraceIncludeMutexInTrace(handle) \
\r
963 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
966 #define vTraceExcludeTimerFromTrace(handle) \
\r
967 TRACE_SET_TIMER_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(handle));
\r
969 #define vTraceIncludeTimerInTrace(handle) \
\r
970 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(handle));
\r
973 #define vTraceExcludeEventGroupFromTrace(handle) \
\r
974 TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(handle));
\r
976 #define vTraceIncludeEventGroupInTrace(handle) \
\r
977 TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(handle));
\r
980 /* Kernel Services */
\r
981 #define vTraceExcludeKernelServiceDelayFromTrace() \
\r
982 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \
\r
983 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);
\r
985 #define vTraceIncludeKernelServiceDelayInTrace() \
\r
986 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \
\r
987 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);
\r
989 /* HELPER MACROS FOR KERNEL SERVICES FOR OBJECTS */
\r
990 #define vTraceExcludeKernelServiceSendFromTrace_HELPER(class) \
\r
991 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \
\r
992 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \
\r
993 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \
\r
994 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \
\r
995 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);
\r
997 #define vTraceIncludeKernelServiceSendInTrace_HELPER(class) \
\r
998 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \
\r
999 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \
\r
1000 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \
\r
1001 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \
\r
1002 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);
\r
1004 #define vTraceExcludeKernelServiceReceiveFromTrace_HELPER(class) \
\r
1005 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \
\r
1006 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \
\r
1007 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \
\r
1008 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \
\r
1009 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);
\r
1011 #define vTraceIncludeKernelServiceReceiveInTrace_HELPER(class) \
\r
1012 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \
\r
1013 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \
\r
1014 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \
\r
1015 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \
\r
1016 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);
\r
1018 /* EXCLUDE AND INCLUDE FOR QUEUE */
\r
1019 #define vTraceExcludeKernelServiceQueueSendFromTrace() \
\r
1020 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_QUEUE);
\r
1022 #define vTraceIncludeKernelServiceQueueSendInTrace() \
\r
1023 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_QUEUE);
\r
1025 #define vTraceExcludeKernelServiceQueueReceiveFromTrace() \
\r
1026 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_QUEUE);
\r
1028 #define vTraceIncludeKernelServiceQueueReceiveInTrace() \
\r
1029 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_QUEUE);
\r
1031 /* EXCLUDE AND INCLUDE FOR SEMAPHORE */
\r
1032 #define vTraceExcludeKernelServiceSemaphoreSendFromTrace() \
\r
1033 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
1035 #define vTraceIncludeKernelServicSemaphoreSendInTrace() \
\r
1036 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
1038 #define vTraceExcludeKernelServiceSemaphoreReceiveFromTrace() \
\r
1039 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
1041 #define vTraceIncludeKernelServiceSemaphoreReceiveInTrace() \
\r
1042 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
1044 /* EXCLUDE AND INCLUDE FOR MUTEX */
\r
1045 #define vTraceExcludeKernelServiceMutexSendFromTrace() \
\r
1046 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_MUTEX);
\r
1048 #define vTraceIncludeKernelServiceMutexSendInTrace() \
\r
1049 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_MUTEX);
\r
1051 #define vTraceExcludeKernelServiceMutexReceiveFromTrace() \
\r
1052 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_MUTEX);
\r
1054 #define vTraceIncludeKernelServiceMutexReceiveInTrace() \
\r
1055 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_MUTEX);
\r
1057 /************************************************************************/
\r
1058 /* KERNEL SPECIFIC MACROS TO NAME OBJECTS, IF NECESSARY */
\r
1059 /************************************************************************/
\r
1060 #define vTraceSetQueueName(object, name) \
\r
1061 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
1063 #define vTraceSetSemaphoreName(object, name) \
\r
1064 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
1066 #define vTraceSetMutexName(object, name) \
\r
1067 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
1069 #define vTraceSetEventGroupName(object, name) \
\r
1070 vTraceSetObjectName(TRACE_CLASS_EVENTGROUP, (objectHandleType)uxEventGroupGetNumber(object), name);
\r
1072 #undef traceQUEUE_REGISTRY_ADD
\r
1073 #define traceQUEUE_REGISTRY_ADD(object, name) vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
1076 #endif /* TRCKERNELPORTFREERTOS_H_ */
\r