1 /*******************************************************************************
\r
2 * Trace Recorder Library for Tracealyzer v3.1.2
\r
3 * Percepio AB, www.percepio.com
\r
6 * This file is part of the trace recorder library (RECORDER), which is the
\r
7 * intellectual property of Percepio AB (PERCEPIO) and provided under a
\r
8 * license as follows.
\r
9 * The RECORDER may be used free of charge for the purpose of recording data
\r
10 * intended for analysis in PERCEPIO products. It may not be used or modified
\r
11 * for other purposes without explicit permission from PERCEPIO.
\r
12 * You may distribute the RECORDER in its original source code form, assuming
\r
13 * this text (terms of use, disclaimer, copyright notice) is unchanged. You are
\r
14 * allowed to distribute the RECORDER with minor modifications intended for
\r
15 * configuration or porting of the RECORDER, e.g., to allow using it on a
\r
16 * specific processor, processor family or with a specific communication
\r
17 * interface. Any such modifications should be documented directly below
\r
18 * this comment block.
\r
21 * The RECORDER is being delivered to you AS IS and PERCEPIO makes no warranty
\r
22 * as to its use or performance. PERCEPIO does not and cannot warrant the
\r
23 * performance or results you may obtain by using the RECORDER or documentation.
\r
24 * PERCEPIO make no warranties, express or implied, as to noninfringement of
\r
25 * third party rights, merchantability, or fitness for any particular purpose.
\r
26 * In no event will PERCEPIO, its technology partners, or distributors be liable
\r
27 * to you for any consequential, incidental or special damages, including any
\r
28 * lost profits or lost savings, even if a representative of PERCEPIO has been
\r
29 * advised of the possibility of such damages, or for any claim by any third
\r
30 * party. Some jurisdictions do not allow the exclusion or limitation of
\r
31 * incidental, consequential or special damages, or the exclusion of implied
\r
32 * warranties or limitations on how long an implied warranty may last, so the
\r
33 * above limitations may not apply to you.
\r
35 * FreeRTOS-specific definitions needed by the trace recorder
\r
39 * Tabs are used for indent in this file (1 tab = 4 spaces)
\r
41 * Copyright Percepio AB, 2017.
\r
43 ******************************************************************************/
\r
45 #ifndef TRC_KERNEL_PORT_H
\r
46 #define TRC_KERNEL_PORT_H
\r
48 #include "FreeRTOS.h" /* Defines configUSE_TRACE_FACILITY */
\r
49 #include "trcPortDefines.h"
\r
55 #define TRC_USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY
\r
57 /*** FreeRTOS version codes **************************************************/
\r
58 #define FREERTOS_VERSION_NOT_SET 0
\r
59 #define TRC_FREERTOS_VERSION_7_3_OR_7_4 1
\r
60 #define TRC_FREERTOS_VERSION_7_5_OR_7_6 2
\r
61 #define TRC_FREERTOS_VERSION_8_X 3
\r
62 #define TRC_FREERTOS_VERSION_9_X 4
\r
64 #if (TRC_USE_TRACEALYZER_RECORDER == 1)
\r
66 /*******************************************************************************
\r
67 * INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 for tracing to work properly
\r
68 ******************************************************************************/
\r
69 #undef INCLUDE_xTaskGetCurrentTaskHandle
\r
70 #define INCLUDE_xTaskGetCurrentTaskHandle 1
\r
72 /*******************************************************************************
\r
73 * vTraceSetQueueName(void* object, const char* name)
\r
75 * Parameter object: pointer to the Queue that shall be named
\r
76 * Parameter name: the name to set (const string literal)
\r
78 * Sets a name for Queue objects for display in Tracealyzer.
\r
79 ******************************************************************************/
\r
80 void vTraceSetQueueName(void* object, const char* name);
\r
82 /*******************************************************************************
\r
83 * vTraceSetSemaphoreName(void* object, const char* name)
\r
85 * Parameter object: pointer to the Semaphore that shall be named
\r
86 * Parameter name: the name to set (const string literal)
\r
88 * Sets a name for Semaphore objects for display in Tracealyzer.
\r
89 ******************************************************************************/
\r
90 void vTraceSetSemaphoreName(void* object, const char* name);
\r
92 /*******************************************************************************
\r
93 * vTraceSetMutexName(void* object, const char* name)
\r
95 * Parameter object: pointer to the Mutex that shall be named
\r
96 * Parameter name: the name to set (const string literal)
\r
98 * Sets a name for Semaphore objects for display in Tracealyzer.
\r
99 ******************************************************************************/
\r
100 void vTraceSetMutexName(void* object, const char* name);
\r
102 /*******************************************************************************
\r
103 * Note: Setting names for event groups is difficult to support, this has been
\r
104 * excluded intentionally. This since we don't know if event_groups.c is
\r
105 * included in the build, so referencing it from the recorder may cause errors.
\r
106 ******************************************************************************/
\r
108 /* Gives the currently executing task (wrapper for RTOS-specific function) */
\r
109 void* prvTraceGetCurrentTaskHandle(void);
\r
111 #if (((TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) && (TRC_CFG_INCLUDE_ISR_TRACING == 1)) || (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING))
\r
112 /* Tells if the scheduler currently is suspended (task-switches can't occur) */
\r
113 unsigned char prvTraceIsSchedulerSuspended(void);
\r
115 /*******************************************************************************
\r
116 * INCLUDE_xTaskGetSchedulerState must be set to 1 for tracing to work properly
\r
117 ******************************************************************************/
\r
118 #undef INCLUDE_xTaskGetSchedulerState
\r
119 #define INCLUDE_xTaskGetSchedulerState 1
\r
121 #endif /* (((TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) && (TRC_CFG_INCLUDE_ISR_TRACING == 1)) || (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)) */
\r
123 #define TRACE_KERNEL_VERSION 0x1AA1
\r
124 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */
\r
125 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */
\r
126 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()
\r
128 #define TRACE_GET_OS_TICKS() (uiTraceTickCount) /* Streaming only */
\r
130 /* If using dynamic allocation of snapshot trace buffer... */
\r
131 #define TRACE_MALLOC(size) pvPortMalloc(size)
\r
133 #ifdef configUSE_TIMERS
\r
134 #if (configUSE_TIMERS == 1)
\r
135 #undef INCLUDE_xTimerGetTimerDaemonTaskHandle
\r
136 #define INCLUDE_xTimerGetTimerDaemonTaskHandle 1
\r
137 #endif /* configUSE_TIMERS == 1*/
\r
138 #endif /* configUSE_TIMERS */
\r
140 /* For ARM Cortex-M devices - assumes the ARM CMSIS API is available */
\r
141 #if (defined (__CORTEX_M))
\r
142 #define TRACE_ALLOC_CRITICAL_SECTION() uint32_t __irq_status;
\r
143 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __get_PRIMASK(); __set_PRIMASK(1);} /* PRIMASK disables ALL interrupts - allows for tracing in any ISR */
\r
144 #define TRACE_EXIT_CRITICAL_SECTION() {__set_PRIMASK(__irq_status);}
\r
147 #if ((TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_ARM_CORTEX_A9) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Renesas_RX600) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_MICROCHIP_PIC24_PIC32))
\r
148 #define TRACE_ALLOC_CRITICAL_SECTION() int __irq_status;
\r
149 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
150 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
153 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Win32)
\r
154 /* In the Win32 port, there are no real interrupts, so we can use the normal critical sections */
\r
155 #define TRACE_ALLOC_CRITICAL_SECTION()
\r
156 #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()
\r
157 #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()
\r
160 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_POWERPC_Z4)
\r
161 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
162 /* FreeRTOS v8.0 or later */
\r
163 #define TRACE_ALLOC_CRITICAL_SECTION() UBaseType_t __irq_status;
\r
164 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
165 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
167 /* FreeRTOS v7.x */
\r
168 #define TRACE_ALLOC_CRITICAL_SECTION() unsigned portBASE_TYPE __irq_status;
\r
169 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
170 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
174 #ifndef TRACE_ENTER_CRITICAL_SECTION
\r
175 #error "This hardware port has no definition for critical sections! See http://percepio.com/2014/10/27/how-to-define-critical-sections-for-the-recorder/"
\r
178 /******************************************************************************/
\r
179 /*** Definitions for Snapshot mode ********************************************/
\r
180 /******************************************************************************/
\r
181 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT)
\r
183 /*** The object classes *******************************************************/
\r
185 #define TRACE_NCLASSES 7
\r
186 #define TRACE_CLASS_QUEUE ((traceObjectClass)0)
\r
187 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)
\r
188 #define TRACE_CLASS_MUTEX ((traceObjectClass)2)
\r
189 #define TRACE_CLASS_TASK ((traceObjectClass)3)
\r
190 #define TRACE_CLASS_ISR ((traceObjectClass)4)
\r
191 #define TRACE_CLASS_TIMER ((traceObjectClass)5)
\r
192 #define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6)
\r
194 /*** Definitions for Object Table ********************************************/
\r
195 #define TRACE_KERNEL_OBJECT_COUNT (TRC_CFG_NQUEUE + TRC_CFG_NSEMAPHORE + TRC_CFG_NMUTEX + TRC_CFG_NTASK + TRC_CFG_NISR + TRC_CFG_NTIMER + TRC_CFG_NEVENTGROUP)
\r
197 /* Queue properties (except name): current number of message in queue */
\r
198 #define PropertyTableSizeQueue (TRC_CFG_NAME_LEN_QUEUE + 1)
\r
200 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */
\r
201 #define PropertyTableSizeSemaphore (TRC_CFG_NAME_LEN_SEMAPHORE + 1)
\r
203 /* Mutex properties (except name): owner (task handle, 0 = free) */
\r
204 #define PropertyTableSizeMutex (TRC_CFG_NAME_LEN_MUTEX + 1)
\r
206 /* Task properties (except name): Byte 0: Current priority
\r
207 Byte 1: state (if already active)
\r
208 Byte 2: legacy, not used
\r
209 Byte 3: legacy, not used */
\r
210 #define PropertyTableSizeTask (TRC_CFG_NAME_LEN_TASK + 4)
\r
212 /* ISR properties: Byte 0: priority
\r
213 Byte 1: state (if already active) */
\r
214 #define PropertyTableSizeISR (TRC_CFG_NAME_LEN_ISR + 2)
\r
216 /* TRC_CFG_NTIMER properties: Byte 0: state (unused for now) */
\r
217 #define PropertyTableSizeTimer (TRC_CFG_NAME_LEN_TIMER + 1)
\r
219 /* TRC_CFG_NEVENTGROUP properties: Byte 0-3: state (unused for now)*/
\r
220 #define PropertyTableSizeEventGroup (TRC_CFG_NAME_LEN_EVENTGROUP + 4)
\r
223 /* The layout of the byte array representing the Object Property Table */
\r
224 #define StartIndexQueue 0
\r
225 #define StartIndexSemaphore StartIndexQueue + TRC_CFG_NQUEUE * PropertyTableSizeQueue
\r
226 #define StartIndexMutex StartIndexSemaphore + TRC_CFG_NSEMAPHORE * PropertyTableSizeSemaphore
\r
227 #define StartIndexTask StartIndexMutex + TRC_CFG_NMUTEX * PropertyTableSizeMutex
\r
228 #define StartIndexISR StartIndexTask + TRC_CFG_NTASK * PropertyTableSizeTask
\r
229 #define StartIndexTimer StartIndexISR + TRC_CFG_NISR * PropertyTableSizeISR
\r
230 #define StartIndexEventGroup StartIndexTimer + TRC_CFG_NTIMER * PropertyTableSizeTimer
\r
232 /* Number of bytes used by the object table */
\r
233 #define TRACE_OBJECT_TABLE_SIZE StartIndexEventGroup + TRC_CFG_NEVENTGROUP * PropertyTableSizeEventGroup
\r
235 ///*** FreeRTOS version codes **************************************************/
\r
236 //#define FREERTOS_VERSION_NOT_SET 0
\r
237 //#define TRC_FREERTOS_VERSION_7_3_OR_7_4 1
\r
238 //#define TRC_FREERTOS_VERSION_7_5_OR_7_6 2
\r
239 //#define TRC_FREERTOS_VERSION_8_X 3
\r
240 //#define TRC_FREERTOS_VERSION_9_X 4
\r
243 /* Flag to tell the context of tracePEND_FUNC_CALL_FROM_ISR */
\r
244 extern int uiInEventGroupSetBitsFromISR;
\r
246 /* Initialization of the object property table */
\r
247 void vTraceInitObjectPropertyTable(void);
\r
249 /* Initialization of the handle mechanism, see e.g, prvTraceGetObjectHandle */
\r
250 void vTraceInitObjectHandleStack(void);
\r
252 /* Returns the "Not enough handles" error message for the specified object class */
\r
253 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
\r
255 traceHandle prvTraceGetObjectNumber(void* handle);
\r
257 uint8_t prvTraceGetObjectType(void* handle);
\r
259 traceHandle prvTraceGetTaskNumber(void* handle);
\r
261 void* prvTraceGetCurrentTaskHandle(void);
\r
263 uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, traceHandle handle);
\r
265 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
266 traceHandle prvTraceGetEventGroupNumber(void* handle);
\r
269 /******************************************************************************
\r
270 * TraceObjectClassTable
\r
271 * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).
\r
272 * Has one entry for each QueueType, gives TRACE_CLASS ID.
\r
273 ******************************************************************************/
\r
274 extern traceObjectClass TraceObjectClassTable[5];
\r
277 /*** Event codes for snapshot mode - must match Tracealyzer config files ******/
\r
279 #define NULL_EVENT (0x00UL)
\r
281 /*******************************************************************************
\r
284 * Miscellaneous events.
\r
285 ******************************************************************************/
\r
286 #define EVENTGROUP_DIV (NULL_EVENT + 1UL) /*0x01*/
\r
287 #define DIV_XPS (EVENTGROUP_DIV + 0UL) /*0x01*/
\r
288 #define DIV_TASK_READY (EVENTGROUP_DIV + 1UL) /*0x02*/
\r
289 #define DIV_NEW_TIME (EVENTGROUP_DIV + 2UL) /*0x03*/
\r
291 /*******************************************************************************
\r
294 * Events for storing task-switches and interrupts. The RESUME events are
\r
295 * generated if the task/interrupt is already marked active.
\r
296 ******************************************************************************/
\r
297 #define EVENTGROUP_TS (EVENTGROUP_DIV + 3UL) /*0x04*/
\r
298 #define TS_ISR_BEGIN (EVENTGROUP_TS + 0UL) /*0x04*/
\r
299 #define TS_ISR_RESUME (EVENTGROUP_TS + 1UL) /*0x05*/
\r
300 #define TS_TASK_BEGIN (EVENTGROUP_TS + 2UL) /*0x06*/
\r
301 #define TS_TASK_RESUME (EVENTGROUP_TS + 3UL) /*0x07*/
\r
303 /*******************************************************************************
\r
304 * EVENTGROUP_OBJCLOSE_NAME
\r
306 * About Close Events
\r
307 * When an object is evicted from the object property table (object close), two
\r
308 * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and
\r
309 * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object
\r
310 * properties valid up to this point.
\r
311 ******************************************************************************/
\r
312 #define EVENTGROUP_OBJCLOSE_NAME (EVENTGROUP_TS + 4UL) /*0x08*/
\r
314 /*******************************************************************************
\r
315 * EVENTGROUP_OBJCLOSE_PROP
\r
317 * The internal event carrying properties of deleted objects
\r
318 * The handle and object class of the closed object is not stored in this event,
\r
319 * but is assumed to be the same as in the preceding CLOSE event. Thus, these
\r
320 * two events must be generated from within a critical section.
\r
321 * When queues are closed, arg1 is the "state" property (i.e., number of
\r
322 * buffered messages/signals).
\r
323 * When actors are closed, arg1 is priority, arg2 is handle of the "instance
\r
324 * finish" event, and arg3 is event code of the "instance finish" event.
\r
325 * In this case, the lower three bits is the object class of the instance finish
\r
326 * handle. The lower three bits are not used (always zero) when queues are
\r
327 * closed since the queue type is given in the previous OBJCLOSE_NAME event.
\r
328 ******************************************************************************/
\r
329 #define EVENTGROUP_OBJCLOSE_PROP (EVENTGROUP_OBJCLOSE_NAME + 8UL) /*0x10*/
\r
331 /*******************************************************************************
\r
332 * EVENTGROUP_CREATE
\r
334 * The events in this group are used to log Kernel object creations.
\r
335 * The lower three bits in the event code gives the object class, i.e., type of
\r
336 * create operation (task, queue, semaphore, etc).
\r
337 ******************************************************************************/
\r
338 #define EVENTGROUP_CREATE_OBJ_SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8UL) /*0x18*/
\r
340 /*******************************************************************************
\r
343 * The events in this group are used to log Send/Give events on queues,
\r
344 * semaphores and mutexes The lower three bits in the event code gives the
\r
345 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
347 ******************************************************************************/
\r
348 #define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_OBJ_SUCCESS + 8UL) /*0x20*/
\r
350 /*******************************************************************************
\r
351 * EVENTGROUP_RECEIVE
\r
353 * The events in this group are used to log Receive/Take events on queues,
\r
354 * semaphores and mutexes. The lower three bits in the event code gives the
\r
355 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
357 ******************************************************************************/
\r
358 #define EVENTGROUP_RECEIVE_SUCCESS (EVENTGROUP_SEND_SUCCESS + 8UL) /*0x28*/
\r
360 /* Send/Give operations, from ISR */
\r
361 #define EVENTGROUP_SEND_FROM_ISR_SUCCESS \
\r
362 (EVENTGROUP_RECEIVE_SUCCESS + 8UL) /*0x30*/
\r
364 /* Receive/Take operations, from ISR */
\r
365 #define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS \
\r
366 (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8UL) /*0x38*/
\r
368 /* "Failed" event type versions of above (timeout, failed allocation, etc) */
\r
369 #define EVENTGROUP_KSE_FAILED \
\r
370 (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8UL) /*0x40*/
\r
372 /* Failed create calls - memory allocation failed */
\r
373 #define EVENTGROUP_CREATE_OBJ_FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/
\r
375 /* Failed send/give - timeout! */
\r
376 #define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_OBJ_FAILED + 8UL) /*0x48*/
\r
378 /* Failed receive/take - timeout! */
\r
379 #define EVENTGROUP_RECEIVE_FAILED (EVENTGROUP_SEND_FAILED + 8UL) /*0x50*/
\r
381 /* Failed non-blocking send/give - queue full */
\r
382 #define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8UL) /*0x58*/
\r
384 /* Failed non-blocking receive/take - queue empty */
\r
385 #define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \
\r
386 (EVENTGROUP_SEND_FROM_ISR_FAILED + 8UL) /*0x60*/
\r
388 /* Events when blocking on receive/take */
\r
389 #define EVENTGROUP_RECEIVE_BLOCK \
\r
390 (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8UL) /*0x68*/
\r
392 /* Events when blocking on send/give */
\r
393 #define EVENTGROUP_SEND_BLOCK (EVENTGROUP_RECEIVE_BLOCK + 8UL) /*0x70*/
\r
395 /* Events on queue peek (receive) */
\r
396 #define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8UL) /*0x78*/
\r
398 /* Events on object delete (vTaskDelete or vQueueDelete) */
\r
399 #define EVENTGROUP_DELETE_OBJ_SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8UL) /*0x80*/
\r
401 /* Other events - object class is implied: TASK */
\r
402 #define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_OBJ_SUCCESS + 8UL) /*0x88*/
\r
403 #define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0UL) /*0x88*/
\r
404 #define TASK_DELAY (EVENTGROUP_OTHERS + 1UL) /*0x89*/
\r
405 #define TASK_SUSPEND (EVENTGROUP_OTHERS + 2UL) /*0x8A*/
\r
406 #define TASK_RESUME (EVENTGROUP_OTHERS + 3UL) /*0x8B*/
\r
407 #define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4UL) /*0x8C*/
\r
408 #define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5UL) /*0x8D*/
\r
409 #define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6UL) /*0x8E*/
\r
410 #define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7UL) /*0x8F*/
\r
412 #define EVENTGROUP_MISC_PLACEHOLDER (EVENTGROUP_OTHERS + 8UL) /*0x90*/
\r
413 #define PEND_FUNC_CALL (EVENTGROUP_MISC_PLACEHOLDER+0UL) /*0x90*/
\r
414 #define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1UL) /*0x91*/
\r
415 #define PEND_FUNC_CALL_FAILED (EVENTGROUP_MISC_PLACEHOLDER+2UL) /*0x92*/
\r
416 #define PEND_FUNC_CALL_FROM_ISR_FAILED (EVENTGROUP_MISC_PLACEHOLDER+3UL) /*0x93*/
\r
417 #define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4UL) /*0x94*/
\r
418 #define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5UL) /*0x95*/
\r
419 #define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6UL) /*0x96*/
\r
420 #define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7UL) /*0x97*/
\r
423 #define EVENTGROUP_USEREVENT (EVENTGROUP_MISC_PLACEHOLDER + 8UL) /*0x98*/
\r
424 #define USER_EVENT (EVENTGROUP_USEREVENT + 0UL)
\r
426 /* Allow for 0-15 arguments (the number of args is added to event code) */
\r
427 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15UL) /*0xA7*/
\r
429 /*******************************************************************************
\r
430 * XTS Event - eXtended TimeStamp events
\r
431 * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.
\r
432 * the time since the last stored event. The DTS fields are either 1 or 2 bytes
\r
433 * in the other events, depending on the bytes available in the event struct.
\r
434 * If the time since the last event (the DTS) is larger than allowed for by
\r
435 * the DTS field of the current event, an XTS event is inserted immediately
\r
436 * before the original event. The XTS event contains up to 3 additional bytes
\r
437 * of the DTS value - the higher bytes of the true DTS value. The lower 1-2
\r
438 * bytes are stored in the normal DTS field.
\r
439 * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored
\r
440 * when there is only room for 1 byte (8 bit) DTS data in the original event,
\r
441 * which means a limit of 0xFF (255UL). The XTS16 is used when the original event
\r
442 * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535UL).
\r
444 * Using a very high frequency time base can result in many XTS events.
\r
445 * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,
\r
446 * at most 65535. If your time base has a higher frequency, you can define
\r
448 ******************************************************************************/
\r
450 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16UL) /*0xA8*/
\r
451 #define XTS8 (EVENTGROUP_SYS + 0UL) /*0xA8*/
\r
452 #define XTS16 (EVENTGROUP_SYS + 1UL) /*0xA9*/
\r
453 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2UL) /*0xAA*/
\r
454 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3UL) /*0xAB*/
\r
455 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4UL) /*0xAC*/
\r
456 #define LOW_POWER_END (EVENTGROUP_SYS + 5UL) /*0xAD*/
\r
457 #define XID (EVENTGROUP_SYS + 6UL) /*0xAE*/
\r
458 #define XTS16L (EVENTGROUP_SYS + 7UL) /*0xAF*/
\r
460 #define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8UL) /*0xB0*/
\r
461 #define TIMER_CREATE (EVENTGROUP_TIMER + 0UL) /*0xB0*/
\r
462 #define TIMER_START (EVENTGROUP_TIMER + 1UL) /*0xB1*/
\r
463 #define TIMER_RST (EVENTGROUP_TIMER + 2UL) /*0xB2*/
\r
464 #define TIMER_STOP (EVENTGROUP_TIMER + 3UL) /*0xB3*/
\r
465 #define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4UL) /*0xB4*/
\r
466 #define TIMER_DELETE (EVENTGROUP_TIMER + 5UL) /*0xB5*/
\r
467 #define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6UL) /*0xB6*/
\r
468 #define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7UL) /*0xB7*/
\r
469 #define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8UL) /*0xB8*/
\r
471 #define TIMER_CREATE_FAILED (EVENTGROUP_TIMER + 9UL) /*0xB9*/
\r
472 #define TIMER_START_FAILED (EVENTGROUP_TIMER + 10UL) /*0xBA*/
\r
473 #define TIMER_RESET_FAILED (EVENTGROUP_TIMER + 11UL) /*0xBB*/
\r
474 #define TIMER_STOP_FAILED (EVENTGROUP_TIMER + 12UL) /*0xBC*/
\r
475 #define TIMER_CHANGE_PERIOD_FAILED (EVENTGROUP_TIMER + 13UL) /*0xBD*/
\r
476 #define TIMER_DELETE_FAILED (EVENTGROUP_TIMER + 14UL) /*0xBE*/
\r
477 #define TIMER_START_FROM_ISR_FAILED (EVENTGROUP_TIMER + 15UL) /*0xBF*/
\r
478 #define TIMER_RESET_FROM_ISR_FAILED (EVENTGROUP_TIMER + 16UL) /*0xC0*/
\r
479 #define TIMER_STOP_FROM_ISR_FAILED (EVENTGROUP_TIMER + 17UL) /*0xC1*/
\r
481 #define EVENTGROUP_EG (EVENTGROUP_TIMER + 18UL) /*0xC2*/
\r
482 #define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0UL) /*0xC2*/
\r
483 #define EVENT_GROUP_CREATE_FAILED (EVENTGROUP_EG + 1UL) /*0xC3*/
\r
484 #define EVENT_GROUP_SYNC_BLOCK (EVENTGROUP_EG + 2UL) /*0xC4*/
\r
485 #define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3UL) /*0xC5*/
\r
486 #define EVENT_GROUP_WAIT_BITS_BLOCK (EVENTGROUP_EG + 4UL) /*0xC6*/
\r
487 #define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5UL) /*0xC7*/
\r
488 #define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6UL) /*0xC8*/
\r
489 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7UL) /*0xC9*/
\r
490 #define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8UL) /*0xCA*/
\r
491 #define EVENT_GROUP_DELETE (EVENTGROUP_EG + 9UL) /*0xCB*/
\r
492 #define EVENT_GROUP_SYNC_END_FAILED (EVENTGROUP_EG + 10UL) /*0xCC*/
\r
493 #define EVENT_GROUP_WAIT_BITS_END_FAILED (EVENTGROUP_EG + 11UL) /*0xCD*/
\r
494 #define EVENT_GROUP_SET_BITS_FROM_ISR (EVENTGROUP_EG + 12UL) /*0xCE*/
\r
495 #define EVENT_GROUP_SET_BITS_FROM_ISR_FAILED (EVENTGROUP_EG + 13UL) /*0xCF*/
\r
497 #define TASK_INSTANCE_FINISHED_NEXT_KSE (EVENTGROUP_EG + 14UL) /*0xD0*/
\r
498 #define TASK_INSTANCE_FINISHED_DIRECT (EVENTGROUP_EG + 15UL) /*0xD1*/
\r
500 #define TRACE_TASK_NOTIFY_GROUP (EVENTGROUP_EG + 16UL) /*0xD2*/
\r
501 #define TRACE_TASK_NOTIFY (TRACE_TASK_NOTIFY_GROUP + 0UL) /*0xD2*/
\r
502 #define TRACE_TASK_NOTIFY_TAKE (TRACE_TASK_NOTIFY_GROUP + 1UL) /*0xD3*/
\r
503 #define TRACE_TASK_NOTIFY_TAKE_BLOCK (TRACE_TASK_NOTIFY_GROUP + 2UL) /*0xD4*/
\r
504 #define TRACE_TASK_NOTIFY_TAKE_FAILED (TRACE_TASK_NOTIFY_GROUP + 3UL) /*0xD5*/
\r
505 #define TRACE_TASK_NOTIFY_WAIT (TRACE_TASK_NOTIFY_GROUP + 4UL) /*0xD6*/
\r
506 #define TRACE_TASK_NOTIFY_WAIT_BLOCK (TRACE_TASK_NOTIFY_GROUP + 5UL) /*0xD7*/
\r
507 #define TRACE_TASK_NOTIFY_WAIT_FAILED (TRACE_TASK_NOTIFY_GROUP + 6UL) /*0xD8*/
\r
508 #define TRACE_TASK_NOTIFY_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 7UL) /*0xD9*/
\r
509 #define TRACE_TASK_NOTIFY_GIVE_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 8UL) /*0xDA*/
\r
512 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)
\r
513 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)
\r
514 #define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))
\r
515 #define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = prvTraceGetObjectHandle(TRACE_CLASS_TASK);
\r
517 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]
\r
518 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))
\r
520 #define TRACE_GET_TIMER_NUMBER(tmr) (((uint32_t)tmr) != 0 ? ( ( traceHandle ) ((Timer_t*)tmr)->uxTimerNumber ) : 0)
\r
521 #define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = prvTraceGetObjectHandle(TRACE_CLASS_TIMER);
\r
522 #define TRACE_GET_TIMER_NAME(pxTimer) pxTimer->pcTimerName
\r
523 #define TRACE_GET_TIMER_PERIOD(pxTimer) pxTimer->xTimerPeriodInTicks
\r
525 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( traceHandle ) uxEventGroupGetNumber(eg) )
\r
527 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = prvTraceGetObjectHandle(TRACE_CLASS_EVENTGROUP);
\r
529 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))
\r
531 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_8_X)
\r
532 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->ucQueueNumber = prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));
\r
534 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->uxQueueNumber = prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));
\r
537 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))
\r
538 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))
\r
539 #define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)
\r
541 /*** The trace macros for snapshot mode **************************************/
\r
543 #ifdef configUSE_TICKLESS_IDLE
\r
544 #if (configUSE_TICKLESS_IDLE != 0)
\r
546 #undef traceLOW_POWER_IDLE_BEGIN
\r
547 #define traceLOW_POWER_IDLE_BEGIN() \
\r
549 extern uint32_t trace_disable_timestamp; \
\r
550 prvTraceStoreLowPower(0); \
\r
551 trace_disable_timestamp = 1; \
\r
554 #undef traceLOW_POWER_IDLE_END
\r
555 #define traceLOW_POWER_IDLE_END() \
\r
557 extern uint32_t trace_disable_timestamp; \
\r
558 trace_disable_timestamp = 0; \
\r
559 prvTraceStoreLowPower(1); \
\r
562 #endif /* configUSE_TICKLESS_IDLE != 0 */
\r
563 #endif /* configUSE_TICKLESS_IDLE */
\r
565 /* A macro that will update the tick count when returning from tickless idle */
\r
566 #undef traceINCREASE_TICK_COUNT
\r
567 #define traceINCREASE_TICK_COUNT( xCount )
\r
569 /* Called for each task that becomes ready */
\r
570 #undef traceMOVED_TASK_TO_READY_STATE
\r
571 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
\r
572 trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);
\r
574 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
\r
575 #undef traceTASK_INCREMENT_TICK
\r
577 #if (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_7_3_OR_7_4)
\r
579 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
580 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
581 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
585 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
586 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
587 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
591 /* Called on each task-switch */
\r
592 #undef traceTASK_SWITCHED_IN
\r
593 #define traceTASK_SWITCHED_IN() \
\r
594 trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());
\r
596 /* Called on vTaskSuspend */
\r
597 #undef traceTASK_SUSPEND
\r
598 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
\r
599 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);
\r
601 /* Called from special case with timer only */
\r
602 #undef traceTASK_DELAY_SUSPEND
\r
603 #define traceTASK_DELAY_SUSPEND( pxTaskToSuspend ) \
\r
604 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend); \
\r
605 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
607 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
\r
608 #undef traceTASK_DELAY
\r
609 #define traceTASK_DELAY() \
\r
610 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \
\r
611 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
613 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
\r
614 #undef traceTASK_DELAY_UNTIL
\r
615 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X
\r
616 #define traceTASK_DELAY_UNTIL(xTimeToWake) \
\r
617 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
\r
618 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
619 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
620 #define traceTASK_DELAY_UNTIL() \
\r
621 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
\r
622 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
623 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
625 #if (TRC_CFG_INCLUDE_OBJECT_DELETE == 1)
\r
626 /* Called on vTaskDelete */
\r
627 #undef traceTASK_DELETE
\r
628 #define traceTASK_DELETE( pxTaskToDelete ) \
\r
629 { TRACE_ALLOC_CRITICAL_SECTION(); \
\r
630 TRACE_ENTER_CRITICAL_SECTION(); \
\r
631 trcKERNEL_HOOKS_TASK_DELETE(DELETE_OBJ, pxTaskToDelete); \
\r
632 TRACE_EXIT_CRITICAL_SECTION(); }
\r
634 /* Called on vQueueDelete */
\r
635 #undef traceQUEUE_DELETE
\r
636 #define traceQUEUE_DELETE( pxQueue ) \
\r
637 { TRACE_ALLOC_CRITICAL_SECTION(); \
\r
638 TRACE_ENTER_CRITICAL_SECTION(); \
\r
639 trcKERNEL_HOOKS_OBJECT_DELETE(DELETE_OBJ, TRC_UNUSED, pxQueue); \
\r
640 TRACE_EXIT_CRITICAL_SECTION(); }
\r
643 /* Called on vTaskCreate */
\r
644 #undef traceTASK_CREATE
\r
645 #define traceTASK_CREATE(pxNewTCB) \
\r
646 if (pxNewTCB != NULL) \
\r
648 trcKERNEL_HOOKS_TASK_CREATE(CREATE_OBJ, TRC_UNUSED, pxNewTCB); \
\r
651 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
\r
652 #undef traceTASK_CREATE_FAILED
\r
653 #define traceTASK_CREATE_FAILED() \
\r
654 trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE_OBJ, TRC_UNUSED);
\r
656 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
\r
657 #undef traceQUEUE_CREATE
\r
658 #define traceQUEUE_CREATE( pxNewQueue )\
\r
659 trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, TRC_UNUSED, pxNewQueue);
\r
661 /* Called in xQueueCreate, if the queue creation fails */
\r
662 #undef traceQUEUE_CREATE_FAILED
\r
663 #define traceQUEUE_CREATE_FAILED( queueType ) \
\r
664 trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, TRC_UNUSED, queueType);
\r
666 /* This macro is not necessary as of FreeRTOS v9.0.0 */
\r
667 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X)
\r
668 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
\r
669 #undef traceCREATE_MUTEX
\r
670 #define traceCREATE_MUTEX( pxNewQueue ) \
\r
671 trcKERNEL_HOOKS_OBJECT_CREATE(CREATE_OBJ, TRC_UNUSED, pxNewQueue);
\r
673 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
\r
674 #undef traceCREATE_MUTEX_FAILED
\r
675 #define traceCREATE_MUTEX_FAILED() \
\r
676 trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE_OBJ, TRC_UNUSED, queueQUEUE_TYPE_MUTEX);
\r
677 #endif /* (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X) */
\r
679 /* Called when the Mutex can not be given, since not holder */
\r
680 #undef traceGIVE_MUTEX_RECURSIVE_FAILED
\r
681 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \
\r
682 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, TRC_UNUSED, pxMutex);
\r
684 /* Called when a message is sent to a queue */ /* CS IS NEW ! */
\r
685 #undef traceQUEUE_SEND
\r
686 #define traceQUEUE_SEND( pxQueue ) \
\r
687 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, TRC_UNUSED, pxQueue); \
\r
688 trcKERNEL_HOOKS_SET_OBJECT_STATE(TRC_UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(TRC_UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
690 /* Called when a message failed to be sent to a queue (timeout) */
\r
691 #undef traceQUEUE_SEND_FAILED
\r
692 #define traceQUEUE_SEND_FAILED( pxQueue ) \
\r
693 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, TRC_UNUSED, pxQueue);
\r
695 /* Called when the task is blocked due to a send operation on a full queue */
\r
696 #undef traceBLOCKING_ON_QUEUE_SEND
\r
697 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
\r
698 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, TRC_UNUSED, pxQueue);
\r
700 /* Called when a message is received from a queue */
\r
701 #undef traceQUEUE_RECEIVE
\r
702 #define traceQUEUE_RECEIVE( pxQueue ) \
\r
703 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, TRC_UNUSED, pxQueue); \
\r
704 trcKERNEL_HOOKS_SET_OBJECT_STATE(TRC_UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(TRC_UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
706 /* Called when a receive operation on a queue fails (timeout) */
\r
707 #undef traceQUEUE_RECEIVE_FAILED
\r
708 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
\r
709 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, TRC_UNUSED, pxQueue);
\r
711 /* Called when the task is blocked due to a receive operation on an empty queue */
\r
712 #undef traceBLOCKING_ON_QUEUE_RECEIVE
\r
713 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
\r
714 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, TRC_UNUSED, pxQueue); \
\r
715 if (TRACE_GET_OBJECT_TRACE_CLASS(TRC_UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \
\r
716 {trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();}
\r
718 /* Called on xQueuePeek */
\r
719 #undef traceQUEUE_PEEK
\r
720 #define traceQUEUE_PEEK( pxQueue ) \
\r
721 trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, TRC_UNUSED, pxQueue);
\r
723 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
\r
724 #undef traceQUEUE_SEND_FROM_ISR
\r
725 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
\r
726 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, TRC_UNUSED, pxQueue); \
\r
727 trcKERNEL_HOOKS_SET_OBJECT_STATE(TRC_UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
729 /* Called when a message send from interrupt context fails (since the queue was full) */
\r
730 #undef traceQUEUE_SEND_FROM_ISR_FAILED
\r
731 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
\r
732 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, TRC_UNUSED, pxQueue);
\r
734 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
\r
735 #undef traceQUEUE_RECEIVE_FROM_ISR
\r
736 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
\r
737 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, TRC_UNUSED, pxQueue); \
\r
738 trcKERNEL_HOOKS_SET_OBJECT_STATE(TRC_UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
740 /* Called when a message receive from interrupt context fails (since the queue was empty) */
\r
741 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
\r
742 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
\r
743 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, TRC_UNUSED, pxQueue);
\r
745 /* Called in vTaskPrioritySet */
\r
746 #undef traceTASK_PRIORITY_SET
\r
747 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
\r
748 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);
\r
750 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
\r
751 #undef traceTASK_PRIORITY_INHERIT
\r
752 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
\r
753 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);
\r
755 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
\r
756 #undef traceTASK_PRIORITY_DISINHERIT
\r
757 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
\r
758 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);
\r
760 /* Called in vTaskResume */
\r
761 #undef traceTASK_RESUME
\r
762 #define traceTASK_RESUME( pxTaskToResume ) \
\r
763 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);
\r
765 /* Called in vTaskResumeFromISR */
\r
766 #undef traceTASK_RESUME_FROM_ISR
\r
767 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
\r
768 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);
\r
771 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
773 #if (TRC_CFG_SCHEDULING_ONLY == 0) && (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1)
\r
775 extern void vTraceStoreMemMangEvent(uint32_t ecode, uint32_t address, int32_t size);
\r
778 #define traceMALLOC( pvAddress, uiSize ) {if (pvAddress != 0) vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, (int32_t)uiSize); }
\r
781 #define traceFREE( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, (int32_t)(-uiSize)); }
\r
783 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) && (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) */
\r
785 /* Called in timer.c - xTimerCreate */
\r
786 #undef traceTIMER_CREATE
\r
787 #define traceTIMER_CREATE(tmr) \
\r
788 trcKERNEL_HOOKS_TIMER_CREATE(TIMER_CREATE, tmr);
\r
790 #undef traceTIMER_CREATE_FAILED
\r
791 #define traceTIMER_CREATE_FAILED() \
\r
792 trcKERNEL_HOOKS_TIMER_EVENT(TIMER_CREATE_FAILED, 0);
\r
794 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */
\r
795 #undef traceTIMER_COMMAND_SEND
\r
796 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
\r
797 if (xCommandID > tmrCOMMAND_START_DONT_TRACE){\
\r
798 if (xCommandID == tmrCOMMAND_CHANGE_PERIOD){ prvTraceStoreKernelCallWithParam((xReturn == pdPASS) ? TIMER_CHANGE_PERIOD : TIMER_CHANGE_PERIOD_FAILED, TRACE_CLASS_TIMER, TRACE_GET_TIMER_NUMBER(tmr), xOptionalValue);}\
\r
799 else if ((xCommandID == tmrCOMMAND_DELETE) && (xReturn == pdPASS)){ trcKERNEL_HOOKS_TIMER_DELETE(TIMER_DELETE, tmr); } \
\r
800 else {trcKERNEL_HOOKS_TIMER_EVENT(EVENTGROUP_TIMER + (uint32_t)xCommandID + ((xReturn == pdPASS)?0:(TIMER_CREATE_FAILED - TIMER_CREATE)), tmr); }\
\r
803 #undef tracePEND_FUNC_CALL
\r
804 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \
\r
805 if (ret == pdPASS){ \
\r
806 prvTraceStoreKernelCall(PEND_FUNC_CALL, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); \
\r
808 prvTraceStoreKernelCall(PEND_FUNC_CALL_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) );}
\r
810 #undef tracePEND_FUNC_CALL_FROM_ISR
\r
811 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \
\r
812 if (! uiInEventGroupSetBitsFromISR){ prvTraceStoreKernelCall(PEND_FUNC_CALL_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTimerGetTimerDaemonTaskHandle()) ); } \
\r
813 uiInEventGroupSetBitsFromISR = 0;
\r
815 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X) */
\r
817 #undef traceEVENT_GROUP_CREATE
\r
818 #define traceEVENT_GROUP_CREATE(eg) \
\r
819 TRACE_SET_EVENTGROUP_NUMBER(eg); \
\r
820 prvTraceStoreKernelCall(EVENT_GROUP_CREATE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));
\r
822 #undef traceEVENT_GROUP_DELETE
\r
823 #define traceEVENT_GROUP_DELETE(eg) \
\r
824 prvTraceStoreKernelCall(EVENT_GROUP_DELETE, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg)); \
\r
825 prvTraceStoreObjectNameOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \
\r
826 prvTraceStoreObjectPropertiesOnCloseEvent(TRACE_GET_EVENTGROUP_NUMBER(eg), TRACE_CLASS_EVENTGROUP); \
\r
827 prvTraceFreeObjectHandle(TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg));
\r
829 #undef traceEVENT_GROUP_CREATE_FAILED
\r
830 #define traceEVENT_GROUP_CREATE_FAILED() \
\r
831 prvTraceStoreKernelCall(EVENT_GROUP_CREATE_FAILED, TRACE_CLASS_EVENTGROUP, 0);
\r
833 #undef traceEVENT_GROUP_SYNC_BLOCK
\r
834 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \
\r
835 prvTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);
\r
837 #undef traceEVENT_GROUP_SYNC_END
\r
838 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \
\r
839 if (wasTimeout){ prvTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor);} \
\r
840 else{ prvTraceStoreKernelCallWithParam(EVENT_GROUP_SYNC_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }
\r
842 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
\r
843 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \
\r
844 prvTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_BLOCK, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); \
\r
845 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
847 #undef traceEVENT_GROUP_WAIT_BITS_END
\r
848 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \
\r
849 if (wasTimeout){ prvTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END_FAILED, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); } \
\r
850 else{ prvTraceStoreKernelCallWithParam(EVENT_GROUP_WAIT_BITS_END, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToWaitFor); }
\r
852 #undef traceEVENT_GROUP_CLEAR_BITS
\r
853 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
\r
854 if (bitsToClear) prvTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);
\r
856 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
\r
857 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
\r
858 if (bitsToClear) prvTraceStoreKernelCallWithParam(EVENT_GROUP_CLEAR_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToClear);
\r
860 #undef traceEVENT_GROUP_SET_BITS
\r
861 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
\r
862 prvTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet);
\r
864 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
\r
865 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
\r
866 prvTraceStoreKernelCallWithParam(EVENT_GROUP_SET_BITS_FROM_ISR, TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(eg), bitsToSet); \
\r
867 uiInEventGroupSetBitsFromISR = 1;
\r
869 #undef traceTASK_NOTIFY_TAKE
\r
870 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X)
\r
871 #define traceTASK_NOTIFY_TAKE() \
\r
872 if (pxCurrentTCB->eNotifyState == eNotified){ \
\r
873 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
875 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);}
\r
876 #else /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X */
\r
877 #define traceTASK_NOTIFY_TAKE() \
\r
878 if (pxCurrentTCB->ucNotifyState == taskNOTIFICATION_RECEIVED){ \
\r
879 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
881 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);}
\r
882 #endif /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X */
\r
884 #undef traceTASK_NOTIFY_TAKE_BLOCK
\r
885 #define traceTASK_NOTIFY_TAKE_BLOCK() \
\r
886 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_TAKE_BLOCK, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
887 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
889 #undef traceTASK_NOTIFY_WAIT
\r
890 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X)
\r
891 #define traceTASK_NOTIFY_WAIT() \
\r
892 if (pxCurrentTCB->eNotifyState == eNotified){ \
\r
893 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
895 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait);}
\r
896 #else /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X */
\r
897 #define traceTASK_NOTIFY_WAIT() \
\r
898 if (pxCurrentTCB->ucNotifyState == taskNOTIFICATION_RECEIVED){ \
\r
899 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
901 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_FAILED, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); }
\r
902 #endif /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_X */
\r
904 #undef traceTASK_NOTIFY_WAIT_BLOCK
\r
905 #define traceTASK_NOTIFY_WAIT_BLOCK() \
\r
906 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_BLOCK, TRACE_CLASS_TASK, uxTaskGetTaskNumber(pxCurrentTCB), xTicksToWait); \
\r
907 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
909 #undef traceTASK_NOTIFY
\r
910 #define traceTASK_NOTIFY() \
\r
911 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));
\r
913 #undef traceTASK_NOTIFY_FROM_ISR
\r
914 #define traceTASK_NOTIFY_FROM_ISR() \
\r
915 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));
\r
917 #undef traceTASK_NOTIFY_GIVE_FROM_ISR
\r
918 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \
\r
919 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_GIVE_FROM_ISR, TRACE_CLASS_TASK, uxTaskGetTaskNumber(xTaskToNotify));
\r
921 #undef traceQUEUE_REGISTRY_ADD
\r
922 #define traceQUEUE_REGISTRY_ADD(object, name) prvTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(TRC_UNUSED, object), TRACE_GET_OBJECT_NUMBER(TRC_UNUSED, object), name);
\r
924 /*******************************************************************************
\r
925 * (macro) vTraceExcludeQueue(object)
\r
927 * Parameter object: pointer to the Queue object that shall be excluded.
\r
929 * Excludes all operations on this object from the trace. Allows for capturing
\r
930 * longer traces the snapshot RAM buffer by filtering out irrelevant events.
\r
931 ******************************************************************************/
\r
932 #define vTraceExcludeQueue(object) \
\r
933 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(TRC_UNUSED, object));
\r
935 /*******************************************************************************
\r
936 * (macro) vTraceExcludeSemaphore(object)
\r
938 * Parameter object: pointer to the Semaphore object that shall be excluded.
\r
940 * Excludes all operations on this object from the trace. Allows for capturing
\r
941 * longer traces the snapshot RAM buffer by filtering out irrelevant events.
\r
943 * Note: Only for snapshot mode.
\r
944 ******************************************************************************/
\r
945 #define vTraceExcludeSemaphore(object) \
\r
946 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(TRC_UNUSED, object));
\r
948 /*******************************************************************************
\r
949 * (macro) vTraceExcludeMutex(object)
\r
951 * Parameter object: pointer to the Mutex object that shall be excluded.
\r
953 * Excludes all operations on this object from the trace. Allows for capturing
\r
954 * longer traces the snapshot RAM buffer by filtering out irrelevant events.
\r
956 * Note: Only for snapshot mode.
\r
957 ******************************************************************************/
\r
958 #define vTraceExcludeMutex(object) \
\r
959 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(TRC_UNUSED, object));
\r
961 /*******************************************************************************
\r
962 * (macro) vTraceExcludeTimer(object)
\r
964 * Parameter object: pointer to the Timer object that shall be excluded.
\r
966 * Excludes all operations on this object from the trace. Allows for capturing
\r
967 * longer traces the snapshot RAM buffer by filtering out irrelevant events.
\r
969 * Note: Only for snapshot mode.
\r
970 ******************************************************************************/
\r
971 #define vTraceExcludeTimer(object) \
\r
972 TRACE_SET_TIMER_FLAG_ISEXCLUDED(TRACE_GET_TIMER_NUMBER(object));
\r
974 /*******************************************************************************
\r
975 * (macro) vTraceExcludeEventGroup(object)
\r
977 * Parameter object: pointer to the Event Group object that shall be excluded.
\r
979 * Excludes all operations on this object from the trace. Allows for capturing
\r
980 * longer traces the snapshot RAM buffer by filtering out irrelevant events.
\r
982 * Note: Only for snapshot mode.
\r
983 ******************************************************************************/
\r
984 #define vTraceExcludeEventGroup(object) \
\r
985 TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(TRACE_GET_EVENTGROUP_NUMBER(object));
\r
987 /*******************************************************************************
\r
988 * (macro) vTraceExcludeTask(object)
\r
990 * Parameter object: pointer to the Task object that shall be excluded.
\r
992 * Excludes all events from the specified task. Allows for capturing
\r
993 * longer traces the snapshot RAM buffer by filtering out irrelevant events.
\r
995 * Excluding tasks is problematic as the previous task will appear to continue
\r
996 * executing while the excluded task is in fact executing. This therefore affects
\r
997 * the timing statistics in an unpredictable way.
\r
998 * Moreover, any operations on queues, semaphores, etc. made by an excluded task
\r
999 * will also be excludes, so Tracealyzer will give an incorrect display regarding
\r
1000 * the states of these objects (number of messages in a queue, etc.).
\r
1002 * This should only be used on short tasks that don't affect other kernel objects
\r
1005 * Note: Only for snapshot mode.
\r
1006 ******************************************************************************/
\r
1007 #define vTraceExcludeTask(object) \
\r
1008 TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(object));
\r
1010 /******************************************************************************
\r
1011 * (macro) vTraceExcludeDelays()
\r
1013 * Excludes all Delay operations from the trace. Allows for capturing
\r
1014 * longer traces the snapshot RAM buffer.
\r
1016 * Note: Only for snapshot mode.
\r
1017 *****************************************************************************/
\r
1018 #define vTraceExcludeDelays() \
\r
1019 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \
\r
1020 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);
\r
1022 /*** Private helper macros for exclude functionality ************************/
\r
1024 #define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(trcExcludedObjects, queueIndex)
\r
1025 #define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(trcExcludedObjects, queueIndex)
\r
1026 #define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(trcExcludedObjects, queueIndex)
\r
1028 #define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+semaphoreIndex)
\r
1029 #define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+semaphoreIndex)
\r
1030 #define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+semaphoreIndex)
\r
1032 #define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+mutexIndex)
\r
1033 #define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+mutexIndex)
\r
1034 #define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+mutexIndex)
\r
1036 #define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+taskIndex)
\r
1037 #define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+taskIndex)
\r
1038 #define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+taskIndex)
\r
1040 #define TRACE_SET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_SET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+TRC_CFG_NTASK+1+timerIndex)
\r
1041 #define TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+TRC_CFG_NTASK+1+timerIndex)
\r
1042 #define TRACE_GET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_GET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+TRC_CFG_NTASK+1+timerIndex)
\r
1044 #define TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_SET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+TRC_CFG_NTASK+1+TRC_CFG_NTIMER+1+egIndex)
\r
1045 #define TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+TRC_CFG_NTASK+1+TRC_CFG_NTIMER+1+egIndex)
\r
1046 #define TRACE_GET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_GET_FLAG_ISEXCLUDED(trcExcludedObjects, TRC_CFG_NQUEUE+1+TRC_CFG_NSEMAPHORE+1+TRC_CFG_NMUTEX+1+TRC_CFG_NTASK+1+TRC_CFG_NTIMER+1+egIndex)
\r
1049 #define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
\r
1050 switch (objectclass) \
\r
1052 case TRACE_CLASS_QUEUE: \
\r
1053 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \
\r
1055 case TRACE_CLASS_SEMAPHORE: \
\r
1056 TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \
\r
1058 case TRACE_CLASS_MUTEX: \
\r
1059 TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \
\r
1061 case TRACE_CLASS_TASK: \
\r
1062 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \
\r
1064 case TRACE_CLASS_TIMER: \
\r
1065 TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(handle); \
\r
1067 case TRACE_CLASS_EVENTGROUP: \
\r
1068 TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(handle); \
\r
1072 #define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
\r
1073 switch (objectclass) \
\r
1075 case TRACE_CLASS_QUEUE: \
\r
1076 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \
\r
1078 case TRACE_CLASS_SEMAPHORE: \
\r
1079 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \
\r
1081 case TRACE_CLASS_MUTEX: \
\r
1082 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \
\r
1084 case TRACE_CLASS_TASK: \
\r
1085 TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \
\r
1087 case TRACE_CLASS_TIMER: \
\r
1088 TRACE_SET_TIMER_FLAG_ISEXCLUDED(handle); \
\r
1090 case TRACE_CLASS_EVENTGROUP: \
\r
1091 TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(handle); \
\r
1095 #endif /*#if TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT */
\r
1097 /******************************************************************************/
\r
1098 /*** Definitions for Streaming mode *******************************************/
\r
1099 /******************************************************************************/
\r
1100 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)
\r
1102 /*******************************************************************************
\r
1103 * vTraceStoreKernelObjectName
\r
1105 * Set the name for a kernel object (defined by its address).
\r
1106 ******************************************************************************/
\r
1107 void vTraceStoreKernelObjectName(void* object, const char* name);
\r
1109 /*******************************************************************************
\r
1112 * Called on trace begin.
\r
1113 ******************************************************************************/
\r
1114 void prvTraceOnBegin(void);
\r
1116 /*******************************************************************************
\r
1119 * Called on trace end.
\r
1120 ******************************************************************************/
\r
1121 void prvTraceOnEnd(void);
\r
1123 /*******************************************************************************
\r
1126 * Tells if this task is already executing, or if there has been a task-switch.
\r
1127 * Assumed to be called within a trace hook in kernel context.
\r
1128 *******************************************************************************/
\r
1129 uint32_t prvIsNewTCB(void* pNewTCB);
\r
1131 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()
\r
1133 /*************************************************************************/
\r
1134 /* KERNEL SPECIFIC OBJECT CONFIGURATION */
\r
1135 /*************************************************************************/
\r
1137 /*******************************************************************************
\r
1138 * The event codes - should match the offline config file.
\r
1139 ******************************************************************************/
\r
1141 /*** Event codes for streaming - should match the Tracealyzer config file *****/
\r
1142 #define PSF_EVENT_NULL_EVENT 0x00
\r
1144 #define PSF_EVENT_TRACE_START 0x01
\r
1145 #define PSF_EVENT_TS_CONFIG 0x02
\r
1146 #define PSF_EVENT_OBJ_NAME 0x03
\r
1147 #define PSF_EVENT_TASK_PRIORITY 0x04
\r
1148 #define PSF_EVENT_TASK_PRIO_INHERIT 0x05
\r
1149 #define PSF_EVENT_TASK_PRIO_DISINHERIT 0x06
\r
1150 #define PSF_EVENT_DEFINE_ISR 0x07
\r
1152 #define PSF_EVENT_TASK_CREATE 0x10
\r
1153 #define PSF_EVENT_QUEUE_CREATE 0x11
\r
1154 #define PSF_EVENT_SEMAPHORE_BINARY_CREATE 0x12
\r
1155 #define PSF_EVENT_MUTEX_CREATE 0x13
\r
1156 #define PSF_EVENT_TIMER_CREATE 0x14
\r
1157 #define PSF_EVENT_EVENTGROUP_CREATE 0x15
\r
1158 #define PSF_EVENT_SEMAPHORE_COUNTING_CREATE 0x16
\r
1159 #define PSF_EVENT_MUTEX_RECURSIVE_CREATE 0x17
\r
1161 #define PSF_EVENT_TASK_DELETE 0x20
\r
1162 #define PSF_EVENT_QUEUE_DELETE 0x21
\r
1163 #define PSF_EVENT_SEMAPHORE_DELETE 0x22
\r
1164 #define PSF_EVENT_MUTEX_DELETE 0x23
\r
1165 #define PSF_EVENT_TIMER_DELETE 0x24
\r
1166 #define PSF_EVENT_EVENTGROUP_DELETE 0x25
\r
1168 #define PSF_EVENT_TASK_READY 0x30
\r
1169 #define PSF_EVENT_NEW_TIME 0x31
\r
1170 #define PSF_EVENT_NEW_TIME_SCHEDULER_SUSPENDED 0x32
\r
1171 #define PSF_EVENT_ISR_BEGIN 0x33
\r
1172 #define PSF_EVENT_ISR_RESUME 0x34
\r
1173 #define PSF_EVENT_TS_BEGIN 0x35
\r
1174 #define PSF_EVENT_TS_RESUME 0x36
\r
1175 #define PSF_EVENT_TASK_ACTIVATE 0x37
\r
1177 #define PSF_EVENT_MALLOC 0x38
\r
1178 #define PSF_EVENT_FREE 0x39
\r
1180 #define PSF_EVENT_LOWPOWER_BEGIN 0x3A
\r
1181 #define PSF_EVENT_LOWPOWER_END 0x3B
\r
1183 #define PSF_EVENT_IFE_NEXT 0x3C
\r
1184 #define PSF_EVENT_IFE_DIRECT 0x3D
\r
1186 #define PSF_EVENT_TASK_CREATE_FAILED 0x40
\r
1187 #define PSF_EVENT_QUEUE_CREATE_FAILED 0x41
\r
1188 #define PSF_EVENT_SEMAPHORE_BINARY_CREATE_FAILED 0x42
\r
1189 #define PSF_EVENT_MUTEX_CREATE_FAILED 0x43
\r
1190 #define PSF_EVENT_TIMER_CREATE_FAILED 0x44
\r
1191 #define PSF_EVENT_EVENTGROUP_CREATE_FAILED 0x45
\r
1192 #define PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED 0x46
\r
1193 #define PSF_EVENT_MUTEX_RECURSIVE_CREATE_FAILED 0x47
\r
1195 #define PSF_EVENT_TIMER_DELETE_FAILED 0x48
\r
1197 #define PSF_EVENT_QUEUE_SEND 0x50
\r
1198 #define PSF_EVENT_SEMAPHORE_GIVE 0x51
\r
1199 #define PSF_EVENT_MUTEX_GIVE 0x52
\r
1201 #define PSF_EVENT_QUEUE_SEND_FAILED 0x53
\r
1202 #define PSF_EVENT_SEMAPHORE_GIVE_FAILED 0x54
\r
1203 #define PSF_EVENT_MUTEX_GIVE_FAILED 0x55
\r
1205 #define PSF_EVENT_QUEUE_SEND_BLOCK 0x56
\r
1206 #define PSF_EVENT_SEMAPHORE_GIVE_BLOCK 0x57
\r
1207 #define PSF_EVENT_MUTEX_GIVE_BLOCK 0x58
\r
1209 #define PSF_EVENT_QUEUE_SEND_FROMISR 0x59
\r
1210 #define PSF_EVENT_SEMAPHORE_GIVE_FROMISR 0x5A
\r
1212 #define PSF_EVENT_QUEUE_SEND_FROMISR_FAILED 0x5C
\r
1213 #define PSF_EVENT_SEMAPHORE_GIVE_FROMISR_FAILED 0x5D
\r
1215 #define PSF_EVENT_QUEUE_RECEIVE 0x60
\r
1216 #define PSF_EVENT_SEMAPHORE_TAKE 0x61
\r
1217 #define PSF_EVENT_MUTEX_TAKE 0x62
\r
1219 #define PSF_EVENT_QUEUE_RECEIVE_FAILED 0x63
\r
1220 #define PSF_EVENT_SEMAPHORE_TAKE_FAILED 0x64
\r
1221 #define PSF_EVENT_MUTEX_TAKE_FAILED 0x65
\r
1223 #define PSF_EVENT_QUEUE_RECEIVE_BLOCK 0x66
\r
1224 #define PSF_EVENT_SEMAPHORE_TAKE_BLOCK 0x67
\r
1225 #define PSF_EVENT_MUTEX_TAKE_BLOCK 0x68
\r
1227 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR 0x69
\r
1228 #define PSF_EVENT_SEMAPHORE_TAKE_FROMISR 0x6A
\r
1230 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED 0x6C
\r
1231 #define PSF_EVENT_SEMAPHORE_TAKE_FROMISR_FAILED 0x6D
\r
1233 #define PSF_EVENT_QUEUE_PEEK 0x70
\r
1234 #define PSF_EVENT_SEMAPHORE_PEEK 0x71 /* Will never be used */
\r
1235 #define PSF_EVENT_MUTEX_PEEK 0x72 /* Will never be used */
\r
1237 #define PSF_EVENT_QUEUE_PEEK_FAILED 0x73
\r
1238 #define PSF_EVENT_SEMAPHORE_PEEK_FAILED 0x74 /* Will never be used */
\r
1239 #define PSF_EVENT_MUTEX_PEEK_FAILED 0x75 /* Will never be used */
\r
1241 #define PSF_EVENT_QUEUE_PEEK_BLOCK 0x76
\r
1242 #define PSF_EVENT_SEMAPHORE_PEEK_BLOCK 0x77 /* Will never be used */
\r
1243 #define PSF_EVENT_MUTEX_PEEK_BLOCK 0x78 /* Will never be used */
\r
1245 #define PSF_EVENT_TASK_DELAY_UNTIL 0x79
\r
1246 #define PSF_EVENT_TASK_DELAY 0x7A
\r
1247 #define PSF_EVENT_TASK_SUSPEND 0x7B
\r
1248 #define PSF_EVENT_TASK_RESUME 0x7C
\r
1249 #define PSF_EVENT_TASK_RESUME_FROMISR 0x7D
\r
1251 #define PSF_EVENT_TIMER_PENDFUNCCALL 0x80
\r
1252 #define PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR 0x81
\r
1253 #define PSF_EVENT_TIMER_PENDFUNCCALL_FAILED 0x82
\r
1254 #define PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR_FAILED 0x83
\r
1256 #define PSF_EVENT_USER_EVENT 0x90
\r
1258 #define PSF_EVENT_TIMER_START 0xA0
\r
1259 #define PSF_EVENT_TIMER_RESET 0xA1
\r
1260 #define PSF_EVENT_TIMER_STOP 0xA2
\r
1261 #define PSF_EVENT_TIMER_CHANGEPERIOD 0xA3
\r
1262 #define PSF_EVENT_TIMER_START_FROMISR 0xA4
\r
1263 #define PSF_EVENT_TIMER_RESET_FROMISR 0xA5
\r
1264 #define PSF_EVENT_TIMER_STOP_FROMISR 0xA6
\r
1265 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR 0xA7
\r
1266 #define PSF_EVENT_TIMER_START_FAILED 0xA8
\r
1267 #define PSF_EVENT_TIMER_RESET_FAILED 0xA9
\r
1268 #define PSF_EVENT_TIMER_STOP_FAILED 0xAA
\r
1269 #define PSF_EVENT_TIMER_CHANGEPERIOD_FAILED 0xAB
\r
1270 #define PSF_EVENT_TIMER_START_FROMISR_FAILED 0xAC
\r
1271 #define PSF_EVENT_TIMER_RESET_FROMISR_FAILED 0xAD
\r
1272 #define PSF_EVENT_TIMER_STOP_FROMISR_FAILED 0xAE
\r
1273 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED 0xAF
\r
1275 #define PSF_EVENT_EVENTGROUP_SYNC 0xB0
\r
1276 #define PSF_EVENT_EVENTGROUP_WAITBITS 0xB1
\r
1277 #define PSF_EVENT_EVENTGROUP_CLEARBITS 0xB2
\r
1278 #define PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR 0xB3
\r
1279 #define PSF_EVENT_EVENTGROUP_SETBITS 0xB4
\r
1280 #define PSF_EVENT_EVENTGROUP_SETBITS_FROMISR 0xB5
\r
1281 #define PSF_EVENT_EVENTGROUP_SYNC_BLOCK 0xB6
\r
1282 #define PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK 0xB7
\r
1283 #define PSF_EVENT_EVENTGROUP_SYNC_FAILED 0xB8
\r
1284 #define PSF_EVENT_EVENTGROUP_WAITBITS_FAILED 0xB9
\r
1286 #define PSF_EVENT_QUEUE_SEND_FRONT 0xC0
\r
1287 #define PSF_EVENT_QUEUE_SEND_FRONT_FAILED 0xC1
\r
1288 #define PSF_EVENT_QUEUE_SEND_FRONT_BLOCK 0xC2
\r
1289 #define PSF_EVENT_QUEUE_SEND_FRONT_FROMISR 0xC3
\r
1290 #define PSF_EVENT_QUEUE_SEND_FRONT_FROMISR_FAILED 0xC4
\r
1291 #define PSF_EVENT_MUTEX_GIVE_RECURSIVE 0xC5
\r
1292 #define PSF_EVENT_MUTEX_GIVE_RECURSIVE_FAILED 0xC6
\r
1293 #define PSF_EVENT_MUTEX_TAKE_RECURSIVE 0xC7
\r
1294 #define PSF_EVENT_MUTEX_TAKE_RECURSIVE_FAILED 0xC8
\r
1296 #define PSF_EVENT_TASK_NOTIFY 0xC9
\r
1297 #define PSF_EVENT_TASK_NOTIFY_TAKE 0xCA
\r
1298 #define PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK 0xCB
\r
1299 #define PSF_EVENT_TASK_NOTIFY_TAKE_FAILED 0xCC
\r
1300 #define PSF_EVENT_TASK_NOTIFY_WAIT 0xCD
\r
1301 #define PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK 0xCE
\r
1302 #define PSF_EVENT_TASK_NOTIFY_WAIT_FAILED 0xCF
\r
1303 #define PSF_EVENT_TASK_NOTIFY_FROM_ISR 0xD0
\r
1304 #define PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR 0xD1
\r
1306 /*** The trace macros for streaming ******************************************/
\r
1308 #if (defined(configUSE_TICKLESS_IDLE) && configUSE_TICKLESS_IDLE != 0)
\r
1310 #undef traceLOW_POWER_IDLE_BEGIN
\r
1311 #define traceLOW_POWER_IDLE_BEGIN() \
\r
1313 prvTraceStoreEvent1(PSF_EVENT_LOWPOWER_BEGIN, xExpectedIdleTime); \
\r
1316 #undef traceLOW_POWER_IDLE_END
\r
1317 #define traceLOW_POWER_IDLE_END() \
\r
1319 prvTraceStoreEvent0(PSF_EVENT_LOWPOWER_END); \
\r
1324 /* A macro that will update the tick count when returning from tickless idle */
\r
1325 #undef traceINCREASE_TICK_COUNT
\r
1326 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/
\r
1327 #define traceINCREASE_TICK_COUNT( xCount ) { extern uint32_t uiTraceTickCount; uiTraceTickCount += xCount; }
\r
1329 /* Called for each task that becomes ready */
\r
1330 #undef traceMOVED_TASK_TO_READY_STATE
\r
1331 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
\r
1332 prvTraceStoreEvent1(PSF_EVENT_TASK_READY, (uint32_t)pxTCB);
\r
1334 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
\r
1335 #undef traceTASK_INCREMENT_TICK
\r
1336 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_7_3_OR_7_4
\r
1337 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
1338 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \
\r
1339 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { prvTraceStoreEvent1(PSF_EVENT_NEW_TIME, (uint32_t)(xTickCount + 1)); }
\r
1340 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_7_3_OR_7_4 */
\r
1341 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
1342 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \
\r
1343 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { prvTraceStoreEvent1(PSF_EVENT_NEW_TIME, (uint32_t)(xTickCount + 1)); }
\r
1344 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_7_3_OR_7_4 */
\r
1346 /* Called on each task-switch */
\r
1347 #undef traceTASK_SWITCHED_IN
\r
1348 #define traceTASK_SWITCHED_IN() \
\r
1349 if (prvIsNewTCB(pxCurrentTCB)) \
\r
1351 prvTraceStoreEvent2(PSF_EVENT_TASK_ACTIVATE, (uint32_t)pxCurrentTCB, pxCurrentTCB->uxPriority); \
\r
1354 /* Called on vTaskSuspend */
\r
1355 #undef traceTASK_SUSPEND
\r
1356 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
\r
1357 prvTraceStoreEvent1(PSF_EVENT_TASK_SUSPEND, (uint32_t)pxTaskToSuspend);
\r
1359 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
\r
1360 #undef traceTASK_DELAY
\r
1361 #define traceTASK_DELAY() \
\r
1362 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY, xTicksToDelay);
\r
1364 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
\r
1365 #undef traceTASK_DELAY_UNTIL
\r
1366 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X
\r
1367 #define traceTASK_DELAY_UNTIL(xTimeToWake) \
\r
1368 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY_UNTIL, (uint32_t)xTimeToWake);
\r
1369 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1370 #define traceTASK_DELAY_UNTIL() \
\r
1371 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY_UNTIL, (uint32_t)xTimeToWake);
\r
1372 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1374 /* Called on vTaskDelete */
\r
1375 #undef traceTASK_DELETE
\r
1376 #define traceTASK_DELETE( pxTaskToDelete ) \
\r
1377 prvTraceStoreEvent2(PSF_EVENT_TASK_DELETE, (uint32_t)pxTaskToDelete, (pxTaskToDelete != NULL) ? (pxTaskToDelete->uxPriority) : 0); \
\r
1378 prvTraceDeleteSymbol(pxTaskToDelete); \
\r
1379 prvTraceDeleteObjectData(pxTaskToDelete);
\r
1381 /* Called on vQueueDelete */
\r
1382 #undef traceQUEUE_DELETE
\r
1383 #define traceQUEUE_DELETE( pxQueue ) \
\r
1384 switch (pxQueue->ucQueueType) \
\r
1386 case queueQUEUE_TYPE_BASE: \
\r
1387 prvTraceStoreEvent2(PSF_EVENT_QUEUE_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \
\r
1389 case queueQUEUE_TYPE_MUTEX: \
\r
1390 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1391 prvTraceStoreEvent2(PSF_EVENT_MUTEX_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \
\r
1393 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1394 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1395 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \
\r
1398 prvTraceDeleteSymbol(pxQueue);
\r
1400 /* Called on vTaskCreate */
\r
1401 #undef traceTASK_CREATE
\r
1402 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X
\r
1403 #define traceTASK_CREATE(pxNewTCB) \
\r
1404 if (pxNewTCB != NULL) \
\r
1406 prvTraceSaveSymbol(pxNewTCB, pxNewTCB->pcTaskName); \
\r
1407 prvTraceSaveObjectData(pxNewTCB, pxNewTCB->uxPriority); \
\r
1408 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, pxNewTCB->pcTaskName, pxNewTCB); \
\r
1409 prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, pxNewTCB->uxPriority); \
\r
1411 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1412 #define traceTASK_CREATE(pxNewTCB) \
\r
1413 if (pxNewTCB != NULL) \
\r
1415 prvTraceSaveSymbol(pxNewTCB, (const char*)pcName); \
\r
1416 prvTraceSaveObjectData(pxNewTCB, uxPriority); \
\r
1417 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, pcName, pxNewTCB); \
\r
1418 prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, uxPriority); \
\r
1420 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1422 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
\r
1423 #undef traceTASK_CREATE_FAILED
\r
1424 #define traceTASK_CREATE_FAILED() \
\r
1425 prvTraceStoreEvent0(PSF_EVENT_TASK_CREATE_FAILED);
\r
1427 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
\r
1428 #undef traceQUEUE_CREATE
\r
1429 #define traceQUEUE_CREATE( pxNewQueue )\
\r
1430 switch (pxNewQueue->ucQueueType) \
\r
1432 case queueQUEUE_TYPE_BASE: \
\r
1433 prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE, (uint32_t)pxNewQueue, pxNewQueue->uxLength); \
\r
1435 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1436 prvTraceStoreEvent1(PSF_EVENT_SEMAPHORE_BINARY_CREATE, (uint32_t)pxNewQueue); \
\r
1440 /* Called in xQueueCreate, if the queue creation fails */
\r
1441 #undef traceQUEUE_CREATE_FAILED
\r
1442 #define traceQUEUE_CREATE_FAILED( queueType ) \
\r
1443 switch (queueType) \
\r
1445 case queueQUEUE_TYPE_BASE: \
\r
1446 prvTraceStoreEvent0(PSF_EVENT_QUEUE_CREATE_FAILED); \
\r
1448 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1449 prvTraceStoreEvent0(PSF_EVENT_SEMAPHORE_BINARY_CREATE_FAILED); \
\r
1453 /* Called in xQueueCreateCountingSemaphore, if the queue creation fails */
\r
1454 #undef traceCREATE_COUNTING_SEMAPHORE
\r
1455 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X
\r
1456 #define traceCREATE_COUNTING_SEMAPHORE() \
\r
1457 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)xHandle, ((Queue_t *) xHandle)->uxMessagesWaiting);
\r
1458 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1459 #define traceCREATE_COUNTING_SEMAPHORE() \
\r
1460 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)pxHandle, pxHandle->uxMessagesWaiting);
\r
1461 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1463 #undef traceCREATE_COUNTING_SEMAPHORE_FAILED
\r
1464 #define traceCREATE_COUNTING_SEMAPHORE_FAILED() \
\r
1465 prvTraceStoreEvent0(PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED);
\r
1467 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
\r
1468 #undef traceCREATE_MUTEX
\r
1469 #define traceCREATE_MUTEX( pxNewQueue ) \
\r
1470 switch (pxNewQueue->ucQueueType) \
\r
1472 case queueQUEUE_TYPE_MUTEX: \
\r
1473 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE, (uint32_t)pxNewQueue); \
\r
1475 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1476 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE, (uint32_t)pxNewQueue); \
\r
1480 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
\r
1481 #undef traceCREATE_MUTEX_FAILED
\r
1482 #define traceCREATE_MUTEX_FAILED() \
\r
1483 prvTraceStoreEvent0(PSF_EVENT_MUTEX_CREATE_FAILED);
\r
1485 /* Called when a message is sent to a queue */ /* CS IS NEW ! */
\r
1486 #undef traceQUEUE_SEND
\r
1487 #define traceQUEUE_SEND( pxQueue ) \
\r
1488 switch (pxQueue->ucQueueType) \
\r
1490 case queueQUEUE_TYPE_BASE: \
\r
1491 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND : PSF_EVENT_QUEUE_SEND_FRONT, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
1493 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1494 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1495 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
1497 case queueQUEUE_TYPE_MUTEX: \
\r
1498 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1499 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE, (uint32_t)pxQueue); \
\r
1503 /* Called when a message failed to be sent to a queue (timeout) */
\r
1504 #undef traceQUEUE_SEND_FAILED
\r
1505 #define traceQUEUE_SEND_FAILED( pxQueue ) \
\r
1506 switch (pxQueue->ucQueueType) \
\r
1508 case queueQUEUE_TYPE_BASE: \
\r
1509 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FAILED : PSF_EVENT_QUEUE_SEND_FRONT_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1511 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1512 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1513 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1515 case queueQUEUE_TYPE_MUTEX: \
\r
1516 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1517 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_FAILED, (uint32_t)pxQueue); \
\r
1521 /* Called when the task is blocked due to a send operation on a full queue */
\r
1522 #undef traceBLOCKING_ON_QUEUE_SEND
\r
1523 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
\r
1524 switch (pxQueue->ucQueueType) \
\r
1526 case queueQUEUE_TYPE_BASE: \
\r
1527 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_BLOCK : PSF_EVENT_QUEUE_SEND_FRONT_BLOCK, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1529 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1530 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1531 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_BLOCK, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1533 case queueQUEUE_TYPE_MUTEX: \
\r
1534 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1535 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_BLOCK, (uint32_t)pxQueue); \
\r
1539 /* Called for Recursive Mutex */
\r
1540 #undef traceGIVE_MUTEX_RECURSIVE
\r
1541 #define traceGIVE_MUTEX_RECURSIVE( pxMutex ) \
\r
1542 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_RECURSIVE, (uint32_t)pxMutex);
\r
1544 /* Called for Recursive Mutex */
\r
1545 #undef traceGIVE_MUTEX_RECURSIVE_FAILED
\r
1546 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \
\r
1547 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_RECURSIVE_FAILED, (uint32_t)pxMutex);
\r
1549 /**************************************************************************/
\r
1550 /* Makes sure xQueueGiveFromISR also has a xCopyPosition parameter */
\r
1551 /**************************************************************************/
\r
1552 /* Helpers needed to correctly expand names */
\r
1553 #define TZ__CAT2(a,b) a ## b
\r
1554 #define TZ__CAT(a,b) TZ__CAT2(a, b)
\r
1556 /* Expands name if this header is included... uxQueueType must be a macro that only exists in queue.c or whatever, and it must expand to nothing or to something that's valid in identifiers */
\r
1557 #define xQueueGiveFromISR(a,b) TZ__CAT(xQueueGiveFromISR__, uxQueueType) (a,b)
\r
1559 /* If in queue.c, the "uxQueueType" macro expands to "pcHead". queueSEND_TO_BACK is the value we need to send in */
\r
1560 #define xQueueGiveFromISR__pcHead(__a, __b) MyWrapper(__a, __b, const BaseType_t xCopyPosition); \
\r
1561 BaseType_t xQueueGiveFromISR(__a, __b) { return MyWrapper(xQueue, pxHigherPriorityTaskWoken, queueSEND_TO_BACK); } \
\r
1562 BaseType_t MyWrapper(__a, __b, const BaseType_t xCopyPosition)
\r
1564 /* If not in queue.c, "uxQueueType" isn't expanded */
\r
1565 #define xQueueGiveFromISR__uxQueueType(__a, __b) xQueueGiveFromISR(__a,__b)
\r
1567 /**************************************************************************/
\r
1568 /* End of xQueueGiveFromISR fix */
\r
1569 /**************************************************************************/
\r
1571 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
\r
1572 #undef traceQUEUE_SEND_FROM_ISR
\r
1573 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
\r
1574 switch (pxQueue->ucQueueType) \
\r
1576 case queueQUEUE_TYPE_BASE: \
\r
1577 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR : PSF_EVENT_QUEUE_SEND_FRONT_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
1579 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1580 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1581 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
1585 /* Called when a message send from interrupt context fails (since the queue was full) */
\r
1586 #undef traceQUEUE_SEND_FROM_ISR_FAILED
\r
1587 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
\r
1588 switch (pxQueue->ucQueueType) \
\r
1590 case queueQUEUE_TYPE_BASE: \
\r
1591 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR_FAILED : PSF_EVENT_QUEUE_SEND_FRONT_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1593 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1594 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1595 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1599 /* Called when a message is received from a queue */
\r
1600 #undef traceQUEUE_RECEIVE
\r
1601 #define traceQUEUE_RECEIVE( pxQueue ) \
\r
1602 switch (pxQueue->ucQueueType) \
\r
1604 case queueQUEUE_TYPE_BASE: \
\r
1605 prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \
\r
1607 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1608 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1609 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_TAKE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \
\r
1611 case queueQUEUE_TYPE_MUTEX: \
\r
1612 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1613 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE, (uint32_t)pxQueue, xTicksToWait); \
\r
1617 /* Called when a receive operation on a queue fails (timeout) */
\r
1618 #undef traceQUEUE_RECEIVE_FAILED
\r
1619 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
\r
1620 switch (pxQueue->ucQueueType) \
\r
1622 case queueQUEUE_TYPE_BASE: \
\r
1623 prvTraceStoreEvent3(xJustPeeking == pdFALSE ? PSF_EVENT_QUEUE_RECEIVE_FAILED : PSF_EVENT_QUEUE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
1625 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1626 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1627 prvTraceStoreEvent3(xJustPeeking == pdFALSE ? PSF_EVENT_SEMAPHORE_TAKE_FAILED : PSF_EVENT_SEMAPHORE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
1629 case queueQUEUE_TYPE_MUTEX: \
\r
1630 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1631 prvTraceStoreEvent2(xJustPeeking == pdFALSE ? PSF_EVENT_MUTEX_TAKE_FAILED : PSF_EVENT_MUTEX_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait); \
\r
1635 /* Called when the task is blocked due to a receive operation on an empty queue */
\r
1636 #undef traceBLOCKING_ON_QUEUE_RECEIVE
\r
1637 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
\r
1638 switch (pxQueue->ucQueueType) \
\r
1640 case queueQUEUE_TYPE_BASE: \
\r
1641 prvTraceStoreEvent3(xJustPeeking == pdFALSE ? PSF_EVENT_QUEUE_RECEIVE_BLOCK : PSF_EVENT_QUEUE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
1643 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1644 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1645 prvTraceStoreEvent3(xJustPeeking == pdFALSE ? PSF_EVENT_SEMAPHORE_TAKE_BLOCK : PSF_EVENT_SEMAPHORE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
1647 case queueQUEUE_TYPE_MUTEX: \
\r
1648 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1649 prvTraceStoreEvent2(xJustPeeking == pdFALSE ? PSF_EVENT_MUTEX_TAKE_BLOCK : PSF_EVENT_MUTEX_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait); \
\r
1653 #undef traceTAKE_MUTEX_RECURSIVE
\r
1654 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X
\r
1655 #define traceTAKE_MUTEX_RECURSIVE( pxQueue ) \
\r
1656 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE_RECURSIVE, (uint32_t)pxQueue, xTicksToWait);
\r
1657 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1658 #define traceTAKE_MUTEX_RECURSIVE( pxQueue ) \
\r
1659 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE_RECURSIVE, (uint32_t)pxQueue, xBlockTime);
\r
1660 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1662 #undef traceTAKE_MUTEX_RECURSIVE_FAILED
\r
1663 #if TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X
\r
1664 #define traceTAKE_MUTEX_RECURSIVE_FAILED( pxQueue ) \
\r
1665 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE_RECURSIVE_FAILED, (uint32_t)pxQueue, xTicksToWait);
\r
1666 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1667 #define traceTAKE_MUTEX_RECURSIVE_FAILED( pxQueue ) \
\r
1668 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE_RECURSIVE_FAILED, (uint32_t)pxQueue, xBlockTime);
\r
1669 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1671 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
\r
1672 #undef traceQUEUE_RECEIVE_FROM_ISR
\r
1673 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
\r
1674 switch (pxQueue->ucQueueType) \
\r
1676 case queueQUEUE_TYPE_BASE: \
\r
1677 prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting - 1); \
\r
1679 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1680 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1681 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_TAKE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting - 1); \
\r
1685 /* Called when a message receive from interrupt context fails (since the queue was empty) */
\r
1686 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
\r
1687 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
\r
1688 switch (pxQueue->ucQueueType) \
\r
1690 case queueQUEUE_TYPE_BASE: \
\r
1691 prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1693 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1694 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1695 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_TAKE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1699 /* Called on xQueuePeek */
\r
1700 #undef traceQUEUE_PEEK
\r
1701 #define traceQUEUE_PEEK( pxQueue ) \
\r
1702 switch (pxQueue->ucQueueType) \
\r
1704 case queueQUEUE_TYPE_BASE: \
\r
1705 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
1707 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1708 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1709 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
1711 case queueQUEUE_TYPE_MUTEX: \
\r
1712 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1713 prvTraceStoreEvent1(PSF_EVENT_MUTEX_PEEK, (uint32_t)pxQueue); \
\r
1717 /* Called in vTaskPrioritySet */
\r
1718 #undef traceTASK_PRIORITY_SET
\r
1719 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
\r
1720 prvTraceSaveObjectData(pxTask, uxNewPriority); \
\r
1721 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIORITY, (uint32_t)pxTask, uxNewPriority);
\r
1723 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
\r
1724 #undef traceTASK_PRIORITY_INHERIT
\r
1725 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
\r
1726 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_INHERIT, (uint32_t)pxTask, uxNewPriority);
\r
1728 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
\r
1729 #undef traceTASK_PRIORITY_DISINHERIT
\r
1730 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
\r
1731 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_DISINHERIT, (uint32_t)pxTask, uxNewPriority);
\r
1733 /* Called in vTaskResume */
\r
1734 #undef traceTASK_RESUME
\r
1735 #define traceTASK_RESUME( pxTaskToResume ) \
\r
1736 prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME, (uint32_t)pxTaskToResume);
\r
1738 /* Called in vTaskResumeFromISR */
\r
1739 #undef traceTASK_RESUME_FROM_ISR
\r
1740 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
\r
1741 prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME_FROMISR, (uint32_t)pxTaskToResume);
\r
1743 #undef traceMALLOC
\r
1744 #define traceMALLOC( pvAddress, uiSize ) \
\r
1745 prvTraceStoreEvent2(PSF_EVENT_MALLOC, (uint32_t)pvAddress, uiSize);
\r
1748 #define traceFREE( pvAddress, uiSize ) \
\r
1749 prvTraceStoreEvent2(PSF_EVENT_FREE, (uint32_t)pvAddress, (uint32_t)(-uiSize));
\r
1751 /* Called in timer.c - xTimerCreate */
\r
1752 #undef traceTIMER_CREATE
\r
1753 #define traceTIMER_CREATE(tmr) \
\r
1754 prvTraceSaveSymbol(tmr, tmr->pcTimerName); \
\r
1755 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, tmr->pcTimerName, tmr); \
\r
1756 prvTraceStoreEvent2(PSF_EVENT_TIMER_CREATE, (uint32_t)tmr, tmr->xTimerPeriodInTicks);
\r
1758 #undef traceTIMER_CREATE_FAILED
\r
1759 #define traceTIMER_CREATE_FAILED() \
\r
1760 prvTraceStoreEvent0(PSF_EVENT_TIMER_CREATE_FAILED);
\r
1762 #if (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X)
\r
1763 #define traceTIMER_COMMAND_SEND_8_0_CASES(tmr) \
\r
1764 case tmrCOMMAND_RESET: \
\r
1765 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_RESET : PSF_EVENT_TIMER_RESET_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
1767 case tmrCOMMAND_START_FROM_ISR: \
\r
1768 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_START_FROMISR : PSF_EVENT_TIMER_START_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
1770 case tmrCOMMAND_RESET_FROM_ISR: \
\r
1771 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_RESET_FROMISR : PSF_EVENT_TIMER_RESET_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
1773 case tmrCOMMAND_STOP_FROM_ISR: \
\r
1774 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP_FROMISR : PSF_EVENT_TIMER_STOP_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
1776 case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR: \
\r
1777 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR : PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
1779 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1780 #define traceTIMER_COMMAND_SEND_8_0_CASES(tmr)
\r
1781 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_8_X || TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1783 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */
\r
1784 #undef traceTIMER_COMMAND_SEND
\r
1785 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
\r
1786 switch(xCommandID) \
\r
1788 case tmrCOMMAND_START: \
\r
1789 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START : PSF_EVENT_TIMER_START_FAILED, (uint32_t)tmr); \
\r
1791 case tmrCOMMAND_STOP: \
\r
1792 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP : PSF_EVENT_TIMER_STOP_FAILED, (uint32_t)tmr); \
\r
1794 case tmrCOMMAND_CHANGE_PERIOD: \
\r
1795 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD : PSF_EVENT_TIMER_CHANGEPERIOD_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
1797 case tmrCOMMAND_DELETE: \
\r
1798 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_DELETE : PSF_EVENT_TIMER_DELETE_FAILED, (uint32_t)tmr); \
\r
1800 traceTIMER_COMMAND_SEND_8_0_CASES(tmr) \
\r
1803 #undef tracePEND_FUNC_CALL
\r
1804 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \
\r
1805 prvTraceStoreEvent1((ret == pdPASS) ? PSF_EVENT_TIMER_PENDFUNCCALL : PSF_EVENT_TIMER_PENDFUNCCALL_FAILED, (uint32_t)func);
\r
1807 #undef tracePEND_FUNC_CALL_FROM_ISR
\r
1808 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \
\r
1809 prvTraceStoreEvent1((ret == pdPASS) ? PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR : PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR_FAILED, (uint32_t)func);
\r
1811 #undef traceEVENT_GROUP_CREATE
\r
1812 #define traceEVENT_GROUP_CREATE(eg) \
\r
1813 prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_CREATE, (uint32_t)eg);
\r
1815 #undef traceEVENT_GROUP_DELETE
\r
1816 #define traceEVENT_GROUP_DELETE(eg) \
\r
1817 prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_DELETE, (uint32_t)eg); \
\r
1818 prvTraceDeleteSymbol(eg);
\r
1820 #undef traceEVENT_GROUP_CREATE_FAILED
\r
1821 #define traceEVENT_GROUP_CREATE_FAILED() \
\r
1822 prvTraceStoreEvent0(PSF_EVENT_EVENTGROUP_CREATE_FAILED);
\r
1824 #undef traceEVENT_GROUP_SYNC_BLOCK
\r
1825 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \
\r
1826 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SYNC_BLOCK, (uint32_t)eg, bitsToWaitFor);
\r
1828 #undef traceEVENT_GROUP_SYNC_END
\r
1829 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \
\r
1830 prvTraceStoreEvent2((wasTimeout != pdTRUE) ? PSF_EVENT_EVENTGROUP_SYNC : PSF_EVENT_EVENTGROUP_SYNC_FAILED, (uint32_t)eg, bitsToWaitFor);
\r
1832 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
\r
1833 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \
\r
1834 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK, (uint32_t)eg, bitsToWaitFor);
\r
1836 #undef traceEVENT_GROUP_WAIT_BITS_END
\r
1837 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \
\r
1838 prvTraceStoreEvent2((wasTimeout != pdTRUE) ? PSF_EVENT_EVENTGROUP_WAITBITS : PSF_EVENT_EVENTGROUP_WAITBITS_FAILED, (uint32_t)eg, bitsToWaitFor);
\r
1840 #undef traceEVENT_GROUP_CLEAR_BITS
\r
1841 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
\r
1842 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS, (uint32_t)eg, bitsToClear);
\r
1844 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
\r
1845 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
\r
1846 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR, (uint32_t)eg, bitsToClear);
\r
1848 #undef traceEVENT_GROUP_SET_BITS
\r
1849 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
\r
1850 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS, (uint32_t)eg, bitsToSet);
\r
1852 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
\r
1853 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
\r
1854 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FROMISR, (uint32_t)eg, bitsToSet);
\r
1856 #undef traceTASK_NOTIFY_TAKE
\r
1857 #if (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X)
\r
1858 #define traceTASK_NOTIFY_TAKE() \
\r
1859 if (pxCurrentTCB->ucNotifyState == taskNOTIFICATION_RECEIVED) \
\r
1860 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
1862 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
1863 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1864 #define traceTASK_NOTIFY_TAKE() \
\r
1865 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
1866 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
1868 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
1869 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1871 #undef traceTASK_NOTIFY_TAKE_BLOCK
\r
1872 #define traceTASK_NOTIFY_TAKE_BLOCK() \
\r
1873 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
1875 #undef traceTASK_NOTIFY_WAIT
\r
1876 #if (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X)
\r
1877 #define traceTASK_NOTIFY_WAIT() \
\r
1878 if (pxCurrentTCB->ucNotifyState == taskNOTIFICATION_RECEIVED) \
\r
1879 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
1881 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
1882 #else /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1883 #define traceTASK_NOTIFY_WAIT() \
\r
1884 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
1885 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
1887 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
1888 #endif /* TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X */
\r
1890 #undef traceTASK_NOTIFY_WAIT_BLOCK
\r
1891 #define traceTASK_NOTIFY_WAIT_BLOCK() \
\r
1892 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
1894 #undef traceTASK_NOTIFY
\r
1895 #define traceTASK_NOTIFY() \
\r
1896 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY, (uint32_t)xTaskToNotify);
\r
1898 #undef traceTASK_NOTIFY_FROM_ISR
\r
1899 #define traceTASK_NOTIFY_FROM_ISR() \
\r
1900 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_FROM_ISR, (uint32_t)xTaskToNotify);
\r
1902 #undef traceTASK_NOTIFY_GIVE_FROM_ISR
\r
1903 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \
\r
1904 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR, (uint32_t)xTaskToNotify);
\r
1906 #undef traceQUEUE_REGISTRY_ADD
\r
1907 #define traceQUEUE_REGISTRY_ADD(object, name) \
\r
1908 prvTraceSaveSymbol(object, (const char*)name); \
\r
1909 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, name, object);
\r
1911 #endif /*#if TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING */
\r
1913 #else /*(TRC_USE_TRACEALYZER_RECORDER == 1)*/
\r
1915 /* when recorder disabled */
\r
1916 #define vTraceSetQueueName(object, name)
\r
1917 #define vTraceSetSemaphoreName(object, name)
\r
1918 #define vTraceSetMutexName(object, name)
\r
1920 #define vTraceExcludeQueue(handle)
\r
1921 #define vTraceExcludeSemaphore(handle)
\r
1922 #define vTraceExcludeMutex(handle)
\r
1923 #define vTraceExcludeTimer(handle)
\r
1924 #define vTraceExcludeEventGroup(handle)
\r
1925 #define vTraceExcludeDelays()
\r
1927 #endif /*(TRC_USE_TRACEALYZER_RECORDER == 1)*/
\r
1929 #ifdef __cplusplus
\r
1933 #endif /* TRC_KERNEL_PORT_H */
\r