1 /*******************************************************************************
\r
2 * Trace Recorder Library for Tracealyzer v4.1.5
\r
3 * Percepio AB, www.percepio.com
\r
6 * This file is part of the trace recorder library (RECORDER), which is the
\r
7 * intellectual property of Percepio AB (PERCEPIO) and provided under a
\r
8 * license as follows.
\r
9 * The RECORDER may be used free of charge for the purpose of recording data
\r
10 * intended for analysis in PERCEPIO products. It may not be used or modified
\r
11 * for other purposes without explicit permission from PERCEPIO.
\r
12 * You may distribute the RECORDER in its original source code form, assuming
\r
13 * this text (terms of use, disclaimer, copyright notice) is unchanged. You are
\r
14 * allowed to distribute the RECORDER with minor modifications intended for
\r
15 * configuration or porting of the RECORDER, e.g., to allow using it on a
\r
16 * specific processor, processor family or with a specific communication
\r
17 * interface. Any such modifications should be documented directly below
\r
18 * this comment block.
\r
21 * The RECORDER is being delivered to you AS IS and PERCEPIO makes no warranty
\r
22 * as to its use or performance. PERCEPIO does not and cannot warrant the
\r
23 * performance or results you may obtain by using the RECORDER or documentation.
\r
24 * PERCEPIO make no warranties, express or implied, as to noninfringement of
\r
25 * third party rights, merchantability, or fitness for any particular purpose.
\r
26 * In no event will PERCEPIO, its technology partners, or distributors be liable
\r
27 * to you for any consequential, incidental or special damages, including any
\r
28 * lost profits or lost savings, even if a representative of PERCEPIO has been
\r
29 * advised of the possibility of such damages, or for any claim by any third
\r
30 * party. Some jurisdictions do not allow the exclusion or limitation of
\r
31 * incidental, consequential or special damages, or the exclusion of implied
\r
32 * warranties or limitations on how long an implied warranty may last, so the
\r
33 * above limitations may not apply to you.
\r
35 * FreeRTOS-specific definitions needed by the trace recorder
\r
39 * Tabs are used for indent in this file (1 tab = 4 spaces)
\r
41 * Copyright Percepio AB, 2018.
\r
43 ******************************************************************************/
\r
45 #ifndef TRC_KERNEL_PORT_H
\r
46 #define TRC_KERNEL_PORT_H
\r
48 #include "FreeRTOS.h" /* Defines configUSE_TRACE_FACILITY */
\r
49 #include "trcPortDefines.h"
\r
55 #define TRC_USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY
\r
57 /*** FreeRTOS version codes **************************************************/
\r
58 #define FREERTOS_VERSION_NOT_SET 0
\r
59 #define TRC_FREERTOS_VERSION_7_3 1 /* v7.3 is earliest supported.*/
\r
60 #define TRC_FREERTOS_VERSION_7_4 2
\r
61 #define TRC_FREERTOS_VERSION_7_5_OR_7_6 3
\r
62 #define TRC_FREERTOS_VERSION_8_X 4 /* Any v8.x.x*/
\r
63 #define TRC_FREERTOS_VERSION_9_0_0 5
\r
64 #define TRC_FREERTOS_VERSION_9_0_1 6
\r
65 #define TRC_FREERTOS_VERSION_9_0_2 7
\r
66 #define TRC_FREERTOS_VERSION_10_0_0 8 /* If using FreeRTOS v10.0.0 or later version */
\r
68 #define TRC_FREERTOS_VERSION_9_X 42 /* Not allowed anymore */
\r
70 #if (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_X)
\r
71 /* This setting for TRC_CFG_FREERTOS_VERSION is no longer allowed as v9.0.1 needs special handling. */
\r
72 #error "Please specify your exact FreeRTOS version in trcConfig.h, from the options listed above."
\r
75 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
76 #define prvGetStreamBufferType(x) ((( StreamBuffer_t * )x )->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER)
\r
78 #define prvGetStreamBufferType(x) 0
\r
81 /* Added mainly for our internal testing. This makes it easier to create test applications that
\r
82 runs on multiple FreeRTOS versions. */
\r
83 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_8_X)
\r
84 /* FreeRTOS v7.0 and later */
\r
85 #define STRING_CAST(x) ( (signed char*) x )
\r
86 #define TickType portTickType
\r
88 /* FreeRTOS v8.0 and later */
\r
89 #define STRING_CAST(x) x
\r
90 #define TickType TickType_t
\r
93 #if (defined(TRC_USE_TRACEALYZER_RECORDER)) && (TRC_USE_TRACEALYZER_RECORDER == 1)
\r
95 /*******************************************************************************
\r
96 * INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 for tracing to work properly
\r
97 ******************************************************************************/
\r
98 #undef INCLUDE_xTaskGetCurrentTaskHandle
\r
99 #define INCLUDE_xTaskGetCurrentTaskHandle 1
\r
101 #if (TRC_CFG_SCHEDULING_ONLY == 0)
\r
102 /*******************************************************************************
\r
103 * vTraceSetQueueName(void* object, const char* name)
\r
105 * Parameter object: pointer to the Queue that shall be named
\r
106 * Parameter name: the name to set (const string literal)
\r
108 * Sets a name for Queue objects for display in Tracealyzer.
\r
109 ******************************************************************************/
\r
110 void vTraceSetQueueName(void* object, const char* name);
\r
112 /*******************************************************************************
\r
113 * vTraceSetSemaphoreName(void* object, const char* name)
\r
115 * Parameter object: pointer to the Semaphore that shall be named
\r
116 * Parameter name: the name to set (const string literal)
\r
118 * Sets a name for Semaphore objects for display in Tracealyzer.
\r
119 ******************************************************************************/
\r
120 void vTraceSetSemaphoreName(void* object, const char* name);
\r
122 /*******************************************************************************
\r
123 * vTraceSetMutexName(void* object, const char* name)
\r
125 * Parameter object: pointer to the Mutex that shall be named
\r
126 * Parameter name: the name to set (const string literal)
\r
128 * Sets a name for Semaphore objects for display in Tracealyzer.
\r
129 ******************************************************************************/
\r
130 void vTraceSetMutexName(void* object, const char* name);
\r
132 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1)
\r
133 /*******************************************************************************
\r
134 * vTraceSetEventGroupName(void* object, const char* name)
\r
136 * Parameter object: pointer to the EventGroup that shall be named
\r
137 * Parameter name: the name to set (const string literal)
\r
139 * Sets a name for EventGroup objects for display in Tracealyzer.
\r
140 ******************************************************************************/
\r
141 void vTraceSetEventGroupName(void* object, const char* name);
\r
142 #else /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */
\r
143 #define vTraceSetEventGroupName(object, name) /* Do nothing */
\r
144 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */
\r
146 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1)
\r
147 /*******************************************************************************
\r
148 * vTraceSetStreamBufferName(void* object, const char* name)
\r
150 * Parameter object: pointer to the StreamBuffer that shall be named
\r
151 * Parameter name: the name to set (const string literal)
\r
153 * Sets a name for StreamBuffer objects for display in Tracealyzer.
\r
154 ******************************************************************************/
\r
155 void vTraceSetStreamBufferName(void* object, const char* name);
\r
156 #else /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */
\r
157 #define vTraceSetStreamBufferName(object, name) /* Do nothing */
\r
158 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */
\r
160 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1)
\r
161 /*******************************************************************************
\r
162 * vTraceSetMessageBufferName(void* object, const char* name)
\r
164 * Parameter object: pointer to the MessageBuffer that shall be named
\r
165 * Parameter name: the name to set (const string literal)
\r
167 * Sets a name for MessageBuffer objects for display in Tracealyzer.
\r
168 ******************************************************************************/
\r
169 void vTraceSetMessageBufferName(void* object, const char* name);
\r
170 #else /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */
\r
171 #define vTraceSetMessageBufferName(object, name) /* Do nothing */
\r
172 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */
\r
174 #else /* (TRC_CFG_SCHEDULING_ONLY == 0) */
\r
176 #define vTraceSetQueueName(object, name) /* Do nothing */
\r
177 #define vTraceSetSemaphoreName(object, name) /* Do nothing */
\r
178 #define vTraceSetMutexName(object, name) /* Do nothing */
\r
179 #define vTraceSetEventGroupName(object, name) /* Do nothing */
\r
180 #define vTraceSetStreamBufferName(object, name) /* Do nothing */
\r
181 #define vTraceSetMessageBufferName(object, name) /* Do nothing */
\r
183 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */
\r
185 /*******************************************************************************
\r
186 * Note: Setting names for event groups is difficult to support, this has been
\r
187 * excluded intentionally. This since we don't know if event_groups.c is
\r
188 * included in the build, so referencing it from the recorder may cause errors.
\r
189 ******************************************************************************/
\r
191 /* Gives the currently executing task (wrapper for RTOS-specific function) */
\r
192 void* prvTraceGetCurrentTaskHandle(void);
\r
194 #if (((TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) && (TRC_CFG_INCLUDE_ISR_TRACING == 1)) || (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING))
\r
195 /* Tells if the scheduler currently is suspended (task-switches can't occur) */
\r
196 unsigned char prvTraceIsSchedulerSuspended(void);
\r
198 /*******************************************************************************
\r
199 * INCLUDE_xTaskGetSchedulerState must be set to 1 for tracing to work properly
\r
200 ******************************************************************************/
\r
201 #undef INCLUDE_xTaskGetSchedulerState
\r
202 #define INCLUDE_xTaskGetSchedulerState 1
\r
204 #endif /* (((TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) && (TRC_CFG_INCLUDE_ISR_TRACING == 1)) || (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)) */
\r
206 #define TRACE_KERNEL_VERSION 0x1AA1
\r
207 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */
\r
208 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */
\r
209 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()
\r
211 #define TRACE_GET_OS_TICKS() (uiTraceTickCount) /* Streaming only */
\r
213 /* If using dynamic allocation of snapshot trace buffer... */
\r
214 #define TRACE_MALLOC(size) pvPortMalloc(size)
\r
216 #if defined(configUSE_TIMERS)
\r
217 #if (configUSE_TIMERS == 1)
\r
218 #undef INCLUDE_xTimerGetTimerDaemonTaskHandle
\r
219 #define INCLUDE_xTimerGetTimerDaemonTaskHandle 1
\r
220 #endif /* configUSE_TIMERS == 1*/
\r
221 #endif /* configUSE_TIMERS */
\r
223 /* For ARM Cortex-M devices - assumes the ARM CMSIS API is available */
\r
224 #if (defined (__CORTEX_M))
\r
225 #define TRACE_ALLOC_CRITICAL_SECTION() uint32_t __irq_status;
\r
226 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __get_PRIMASK(); __set_PRIMASK(1);} /* PRIMASK disables ALL interrupts - allows for tracing in any ISR */
\r
227 #define TRACE_EXIT_CRITICAL_SECTION() {__set_PRIMASK(__irq_status);}
\r
230 #if ((TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_ARM_CORTEX_A9) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Renesas_RX600) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_MICROCHIP_PIC24_PIC32) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Altera_NiosII))
\r
231 #define TRACE_ALLOC_CRITICAL_SECTION() int __irq_status;
\r
232 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
233 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
236 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Win32)
\r
237 /* In the Win32 port, there are no real interrupts, so we can use the normal critical sections */
\r
238 #define TRACE_ALLOC_CRITICAL_SECTION()
\r
239 #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()
\r
240 #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()
\r
243 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_POWERPC_Z4)
\r
244 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
245 /* FreeRTOS v8.0 or later */
\r
246 #define TRACE_ALLOC_CRITICAL_SECTION() UBaseType_t __irq_status;
\r
247 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
248 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
250 /* FreeRTOS v7.x */
\r
251 #define TRACE_ALLOC_CRITICAL_SECTION() unsigned portBASE_TYPE __irq_status;
\r
252 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
\r
253 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
\r
257 #ifndef TRACE_ENTER_CRITICAL_SECTION
\r
258 #error "This hardware port has no definition for critical sections! See http://percepio.com/2014/10/27/how-to-define-critical-sections-for-the-recorder/"
\r
262 #if (TRC_CFG_FREERTOS_VERSION == TRC_FREERTOS_VERSION_9_0_1)
\r
263 /******************************************************************************
\r
264 * Fix for FreeRTOS v9.0.1 to correctly identify xQueuePeek events.
\r
266 * In FreeRTOS v9.0.1, the below trace hooks are incorrectly used from three
\r
267 * different functions. This as the earlier function xQueueGenericReceive
\r
268 * has been replaced by xQueuePeek, xQueueSemaphoreTake and xQueueReceive.
\r
270 * xQueueGenericReceive had a parameter "xJustPeeking", used by the trace hooks
\r
271 * to tell between xQueuePeek events and others. This is no longer present, so
\r
272 * we need another way to correctly identify peek events. Since all three
\r
273 * functions call the same trace macros, the context of these macro is unknown.
\r
275 * We therefore check the __LINE__ macro inside of the trace macros. This gives
\r
276 * the line number of queue.c, where the macros are used. This can be used to
\r
277 * tell if the context is xQueuePeek or another function.
\r
278 * __LINE__ is a standard compiler feature since ancient times, so it should
\r
279 * work on all common compilers.
\r
281 * This might seem as a quite brittle and unusual solution, but works in this
\r
282 * particular case and is only for FreeRTOS v9.0.1.
\r
283 * Future versions of FreeRTOS should not need this fix, as we have submitted
\r
284 * a correction of queue.c with individual trace macros for each function.
\r
285 ******************************************************************************/
\r
286 #define isQueueReceiveHookActuallyPeek (__LINE__ > 1674) /* Half way between the closes trace points */
\r
288 #elif (TRC_CFG_FREERTOS_VERSION <= TRC_FREERTOS_VERSION_9_0_0)
\r
289 #define isQueueReceiveHookActuallyPeek xJustPeeking
\r
291 #elif (TRC_CFG_FREERTOS_VERSION > TRC_FREERTOS_VERSION_9_0_1)
\r
292 #define isQueueReceiveHookActuallyPeek (__LINE__ < 0) /* instead of pdFALSE to fix a warning of "constant condition" */
\r
296 extern uint16_t CurrentFilterMask;
\r
298 extern uint16_t CurrentFilterGroup;
\r
300 uint8_t prvTraceGetQueueType(void* handle);
\r
302 uint16_t prvTraceGetTaskNumberLow16(void* handle);
\r
303 uint16_t prvTraceGetTaskNumberHigh16(void* handle);
\r
304 void prvTraceSetTaskNumberLow16(void* handle, uint16_t value);
\r
305 void prvTraceSetTaskNumberHigh16(void* handle, uint16_t value);
\r
307 uint16_t prvTraceGetQueueNumberLow16(void* handle);
\r
308 uint16_t prvTraceGetQueueNumberHigh16(void* handle);
\r
309 void prvTraceSetQueueNumberLow16(void* handle, uint16_t value);
\r
310 void prvTraceSetQueueNumberHigh16(void* handle, uint16_t value);
\r
312 #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1 && TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
313 uint16_t prvTraceGetTimerNumberLow16(void* handle);
\r
314 uint16_t prvTraceGetTimerNumberHigh16(void* handle);
\r
315 void prvTraceSetTimerNumberLow16(void* handle, uint16_t value);
\r
316 void prvTraceSetTimerNumberHigh16(void* handle, uint16_t value);
\r
317 #endif /* (TRC_CFG_INCLUDE_TIMER_EVENTS == 1 && TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
319 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1 && TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
320 uint16_t prvTraceGetEventGroupNumberLow16(void* handle);
\r
321 uint16_t prvTraceGetEventGroupNumberHigh16(void* handle);
\r
322 void prvTraceSetEventGroupNumberLow16(void* handle, uint16_t value);
\r
323 void prvTraceSetEventGroupNumberHigh16(void* handle, uint16_t value);
\r
324 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1 && TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
326 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1 && TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
327 uint16_t prvTraceGetStreamBufferNumberLow16(void* handle);
\r
328 uint16_t prvTraceGetStreamBufferNumberHigh16(void* handle);
\r
329 void prvTraceSetStreamBufferNumberLow16(void* handle, uint16_t value);
\r
330 void prvTraceSetStreamBufferNumberHigh16(void* handle, uint16_t value);
\r
331 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1 && TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
333 #define TRACE_GET_TASK_FILTER(pxTask) prvTraceGetTaskNumberHigh16((void*)pxTask)
\r
334 #define TRACE_SET_TASK_FILTER(pxTask, group) prvTraceSetTaskNumberHigh16((void*)pxTask, group)
\r
336 #define TRACE_GET_QUEUE_FILTER(pxObject) prvTraceGetQueueNumberHigh16((void*)pxObject)
\r
337 #define TRACE_SET_QUEUE_FILTER(pxObject, group) prvTraceSetQueueNumberHigh16((void*)pxObject, group)
\r
339 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
340 #define TRACE_GET_EVENTGROUP_FILTER(pxObject) prvTraceGetEventGroupNumberHigh16((void*)pxObject)
\r
341 #define TRACE_SET_EVENTGROUP_FILTER(pxObject, group) prvTraceSetEventGroupNumberHigh16((void*)pxObject, group)
\r
342 #else /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
343 /* FreeRTOS versions before v10.0 does not support filtering for event groups */
\r
344 #define TRACE_GET_EVENTGROUP_FILTER(pxObject) 1
\r
345 #define TRACE_SET_EVENTGROUP_FILTER(pxObject, group)
\r
346 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
348 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
349 #define TRACE_GET_TIMER_FILTER(pxObject) prvTraceGetTimerNumberHigh16((void*)pxObject)
\r
350 #define TRACE_SET_TIMER_FILTER(pxObject, group) prvTraceSetTimerNumberHigh16((void*)pxObject, group)
\r
351 #else /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
352 /* FreeRTOS versions before v10.0 does not support filtering for timers */
\r
353 #define TRACE_GET_TIMER_FILTER(pxObject) 1
\r
354 #define TRACE_SET_TIMER_FILTER(pxObject, group)
\r
355 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
357 #define TRACE_GET_STREAMBUFFER_FILTER(pxObject) prvTraceGetStreamBufferNumberHigh16((void*)pxObject)
\r
358 #define TRACE_SET_STREAMBUFFER_FILTER(pxObject, group) prvTraceSetStreamBufferNumberHigh16((void*)pxObject, group)
\r
360 /* We can only support filtering if FreeRTOS is at least v7.4 */
\r
361 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_4)
\r
362 #define TRACE_GET_OBJECT_FILTER(CLASS, pxObject) TRACE_GET_##CLASS##_FILTER(pxObject)
\r
363 #define TRACE_SET_OBJECT_FILTER(CLASS, pxObject, group) TRACE_SET_##CLASS##_FILTER(pxObject, group)
\r
364 #else /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_4) */
\r
365 #define TRACE_GET_OBJECT_FILTER(CLASS, pxObject) 1
\r
366 #define TRACE_SET_OBJECT_FILTER(CLASS, pxObject, group)
\r
367 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_4) */
\r
369 /******************************************************************************/
\r
370 /*** Definitions for Snapshot mode ********************************************/
\r
371 /******************************************************************************/
\r
372 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT)
\r
374 /*** The object classes *******************************************************/
\r
376 #define TRACE_NCLASSES 9
\r
377 #define TRACE_CLASS_QUEUE ((traceObjectClass)0)
\r
378 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)
\r
379 #define TRACE_CLASS_MUTEX ((traceObjectClass)2)
\r
380 #define TRACE_CLASS_TASK ((traceObjectClass)3)
\r
381 #define TRACE_CLASS_ISR ((traceObjectClass)4)
\r
382 #define TRACE_CLASS_TIMER ((traceObjectClass)5)
\r
383 #define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6)
\r
384 #define TRACE_CLASS_STREAMBUFFER ((traceObjectClass)7)
\r
385 #define TRACE_CLASS_MESSAGEBUFFER ((traceObjectClass)8)
\r
387 /*** Definitions for Object Table ********************************************/
\r
388 #define TRACE_KERNEL_OBJECT_COUNT ((TRC_CFG_NQUEUE) + (TRC_CFG_NSEMAPHORE) + (TRC_CFG_NMUTEX) + (TRC_CFG_NTASK) + (TRC_CFG_NISR) + (TRC_CFG_NTIMER) + (TRC_CFG_NEVENTGROUP) + (TRC_CFG_NSTREAMBUFFER) + (TRC_CFG_NMESSAGEBUFFER))
\r
390 /* Queue properties (except name): current number of message in queue */
\r
391 #define PropertyTableSizeQueue ((TRC_CFG_NAME_LEN_QUEUE) + 1)
\r
393 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */
\r
394 #define PropertyTableSizeSemaphore ((TRC_CFG_NAME_LEN_SEMAPHORE) + 1)
\r
396 /* Mutex properties (except name): owner (task handle, 0 = free) */
\r
397 #define PropertyTableSizeMutex ((TRC_CFG_NAME_LEN_MUTEX) + 1)
\r
399 /* Task properties (except name): Byte 0: Current priority
\r
400 Byte 1: state (if already active)
\r
401 Byte 2: legacy, not used
\r
402 Byte 3: legacy, not used */
\r
403 #define PropertyTableSizeTask ((TRC_CFG_NAME_LEN_TASK) + 4)
\r
405 /* ISR properties: Byte 0: priority
\r
406 Byte 1: state (if already active) */
\r
407 #define PropertyTableSizeISR ((TRC_CFG_NAME_LEN_ISR) + 2)
\r
409 /* TRC_CFG_NTIMER properties: Byte 0: state (unused for now) */
\r
410 #define PropertyTableSizeTimer ((TRC_CFG_NAME_LEN_TIMER) + 1)
\r
412 /* TRC_CFG_NEVENTGROUP properties: Byte 0-3: state (unused for now)*/
\r
413 #define PropertyTableSizeEventGroup ((TRC_CFG_NAME_LEN_EVENTGROUP) + 4)
\r
415 /* TRC_CFG_NSTREAMBUFFER properties: Byte 0-3: state (unused for now)*/
\r
416 #define PropertyTableSizeStreamBuffer ((TRC_CFG_NAME_LEN_STREAMBUFFER) + 4)
\r
418 /* TRC_CFG_NMESSAGEBUFFER properties: Byte 0-3: state (unused for now)*/
\r
419 #define PropertyTableSizeMessageBuffer ((TRC_CFG_NAME_LEN_MESSAGEBUFFER) + 4)
\r
422 /* The layout of the byte array representing the Object Property Table */
\r
423 #define StartIndexQueue (0)
\r
424 #define StartIndexSemaphore (StartIndexQueue + (TRC_CFG_NQUEUE) * PropertyTableSizeQueue)
\r
425 #define StartIndexMutex (StartIndexSemaphore + (TRC_CFG_NSEMAPHORE) * PropertyTableSizeSemaphore)
\r
426 #define StartIndexTask (StartIndexMutex + (TRC_CFG_NMUTEX) * PropertyTableSizeMutex)
\r
427 #define StartIndexISR (StartIndexTask + (TRC_CFG_NTASK) * PropertyTableSizeTask)
\r
428 #define StartIndexTimer (StartIndexISR + (TRC_CFG_NISR) * PropertyTableSizeISR)
\r
429 #define StartIndexEventGroup (StartIndexTimer + (TRC_CFG_NTIMER) * PropertyTableSizeTimer)
\r
430 #define StartIndexStreamBuffer (StartIndexEventGroup + (TRC_CFG_NEVENTGROUP) * PropertyTableSizeEventGroup)
\r
431 #define StartIndexMessageBuffer (StartIndexStreamBuffer + (TRC_CFG_NSTREAMBUFFER) * PropertyTableSizeStreamBuffer)
\r
433 /* Number of bytes used by the object table */
\r
434 #define TRACE_OBJECT_TABLE_SIZE (StartIndexMessageBuffer + (TRC_CFG_NMESSAGEBUFFER) * PropertyTableSizeMessageBuffer)
\r
436 /* Flag to tell the context of tracePEND_FUNC_CALL_FROM_ISR */
\r
437 extern int uiInEventGroupSetBitsFromISR;
\r
439 /* Initialization of the object property table */
\r
440 void vTraceInitObjectPropertyTable(void);
\r
442 /* Initialization of the handle mechanism, see e.g, prvTraceGetObjectHandle */
\r
443 void vTraceInitObjectHandleStack(void);
\r
445 /* Returns the "Not enough handles" error message for the specified object class */
\r
446 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
\r
448 void* prvTraceGetCurrentTaskHandle(void);
\r
450 /******************************************************************************
\r
451 * TraceQueueClassTable
\r
452 * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).
\r
453 * Has one entry for each QueueType, gives TRACE_CLASS ID.
\r
454 ******************************************************************************/
\r
455 extern traceObjectClass TraceQueueClassTable[5];
\r
458 /*** Event codes for snapshot mode - must match Tracealyzer config files ******/
\r
460 #define NULL_EVENT (0x00UL)
\r
462 /*******************************************************************************
\r
465 * Miscellaneous events.
\r
466 ******************************************************************************/
\r
467 #define EVENTGROUP_DIV (NULL_EVENT + 1UL) /*0x01*/
\r
468 #define DIV_XPS (EVENTGROUP_DIV + 0UL) /*0x01*/
\r
469 #define DIV_TASK_READY (EVENTGROUP_DIV + 1UL) /*0x02*/
\r
470 #define DIV_NEW_TIME (EVENTGROUP_DIV + 2UL) /*0x03*/
\r
472 /*******************************************************************************
\r
475 * Events for storing task-switches and interrupts. The RESUME events are
\r
476 * generated if the task/interrupt is already marked active.
\r
477 ******************************************************************************/
\r
478 #define EVENTGROUP_TS (EVENTGROUP_DIV + 3UL) /*0x04*/
\r
479 #define TS_ISR_BEGIN (EVENTGROUP_TS + 0UL) /*0x04*/
\r
480 #define TS_ISR_RESUME (EVENTGROUP_TS + 1UL) /*0x05*/
\r
481 #define TS_TASK_BEGIN (EVENTGROUP_TS + 2UL) /*0x06*/
\r
482 #define TS_TASK_RESUME (EVENTGROUP_TS + 3UL) /*0x07*/
\r
484 /*******************************************************************************
\r
485 * EVENTGROUP_OBJCLOSE_NAME
\r
487 * About Close Events
\r
488 * When an object is evicted from the object property table (object close), two
\r
489 * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and
\r
490 * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object
\r
491 * properties valid up to this point.
\r
492 ******************************************************************************/
\r
493 #define EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS (EVENTGROUP_TS + 4UL) /*0x08*/
\r
495 /*******************************************************************************
\r
496 * EVENTGROUP_OBJCLOSE_PROP
\r
498 * The internal event carrying properties of deleted objects
\r
499 * The handle and object class of the closed object is not stored in this event,
\r
500 * but is assumed to be the same as in the preceding CLOSE event. Thus, these
\r
501 * two events must be generated from within a critical section.
\r
502 * When queues are closed, arg1 is the "state" property (i.e., number of
\r
503 * buffered messages/signals).
\r
504 * When actors are closed, arg1 is priority, arg2 is handle of the "instance
\r
505 * finish" event, and arg3 is event code of the "instance finish" event.
\r
506 * In this case, the lower three bits is the object class of the instance finish
\r
507 * handle. The lower three bits are not used (always zero) when queues are
\r
508 * closed since the queue type is given in the previous OBJCLOSE_NAME event.
\r
509 ******************************************************************************/
\r
510 #define EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS (EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + 8UL) /*0x10*/
\r
512 /*******************************************************************************
\r
513 * EVENTGROUP_CREATE
\r
515 * The events in this group are used to log Kernel object creations.
\r
516 * The lower three bits in the event code gives the object class, i.e., type of
\r
517 * create operation (task, queue, semaphore, etc).
\r
518 ******************************************************************************/
\r
519 #define EVENTGROUP_CREATE_OBJ_TRCSUCCESS (EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + 8UL) /*0x18*/
\r
521 /*******************************************************************************
\r
524 * The events in this group are used to log Send/Give events on queues,
\r
525 * semaphores and mutexes The lower three bits in the event code gives the
\r
526 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
528 ******************************************************************************/
\r
529 #define EVENTGROUP_SEND_TRCSUCCESS (EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 8UL) /*0x20*/
\r
531 /*******************************************************************************
\r
532 * EVENTGROUP_RECEIVE
\r
534 * The events in this group are used to log Receive/Take events on queues,
\r
535 * semaphores and mutexes. The lower three bits in the event code gives the
\r
536 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
538 ******************************************************************************/
\r
539 #define EVENTGROUP_RECEIVE_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 8UL) /*0x28*/
\r
541 /* Send/Give operations, from ISR */
\r
542 #define EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS \
\r
543 (EVENTGROUP_RECEIVE_TRCSUCCESS + 8UL) /*0x30*/
\r
545 /* Receive/Take operations, from ISR */
\r
546 #define EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS \
\r
547 (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 8UL) /*0x38*/
\r
549 /* "Failed" event type versions of above (timeout, failed allocation, etc) */
\r
550 #define EVENTGROUP_KSE_TRCFAILED \
\r
551 (EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 8UL) /*0x40*/
\r
553 /* Failed create calls - memory allocation failed */
\r
554 #define EVENTGROUP_CREATE_OBJ_TRCFAILED (EVENTGROUP_KSE_TRCFAILED) /*0x40*/
\r
556 /* Failed send/give - timeout! */
\r
557 #define EVENTGROUP_SEND_TRCFAILED (EVENTGROUP_CREATE_OBJ_TRCFAILED + 8UL) /*0x48*/
\r
559 /* Failed receive/take - timeout! */
\r
560 #define EVENTGROUP_RECEIVE_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 8UL) /*0x50*/
\r
562 /* Failed non-blocking send/give - queue full */
\r
563 #define EVENTGROUP_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 8UL) /*0x58*/
\r
565 /* Failed non-blocking receive/take - queue empty */
\r
566 #define EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED \
\r
567 (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 8UL) /*0x60*/
\r
569 /* Events when blocking on receive/take */
\r
570 #define EVENTGROUP_RECEIVE_TRCBLOCK \
\r
571 (EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 8UL) /*0x68*/
\r
573 /* Events when blocking on send/give */
\r
574 #define EVENTGROUP_SEND_TRCBLOCK (EVENTGROUP_RECEIVE_TRCBLOCK + 8UL) /*0x70*/
\r
576 /* Events on queue peek (receive) */
\r
577 #define EVENTGROUP_PEEK_TRCSUCCESS (EVENTGROUP_SEND_TRCBLOCK + 8UL) /*0x78*/
\r
579 /* Events on object delete (vTaskDelete or vQueueDelete) */
\r
580 #define EVENTGROUP_DELETE_OBJ_TRCSUCCESS (EVENTGROUP_PEEK_TRCSUCCESS + 8UL) /*0x80*/
\r
582 /* Other events - object class is implied: TASK */
\r
583 #define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 8UL) /*0x88*/
\r
584 #define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0UL) /*0x88*/
\r
585 #define TASK_DELAY (EVENTGROUP_OTHERS + 1UL) /*0x89*/
\r
586 #define TASK_SUSPEND (EVENTGROUP_OTHERS + 2UL) /*0x8A*/
\r
587 #define TASK_RESUME (EVENTGROUP_OTHERS + 3UL) /*0x8B*/
\r
588 #define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4UL) /*0x8C*/
\r
589 #define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5UL) /*0x8D*/
\r
590 #define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6UL) /*0x8E*/
\r
591 #define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7UL) /*0x8F*/
\r
593 #define EVENTGROUP_MISC_PLACEHOLDER (EVENTGROUP_OTHERS + 8UL) /*0x90*/
\r
594 #define PEND_FUNC_CALL (EVENTGROUP_MISC_PLACEHOLDER+0UL) /*0x90*/
\r
595 #define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1UL) /*0x91*/
\r
596 #define PEND_FUNC_CALL_TRCFAILED (EVENTGROUP_MISC_PLACEHOLDER+2UL) /*0x92*/
\r
597 #define PEND_FUNC_CALL_FROM_ISR_TRCFAILED (EVENTGROUP_MISC_PLACEHOLDER+3UL) /*0x93*/
\r
598 #define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4UL) /*0x94*/
\r
599 #define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5UL) /*0x95*/
\r
600 #define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6UL) /*0x96*/
\r
601 #define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7UL) /*0x97*/
\r
604 #define EVENTGROUP_USEREVENT (EVENTGROUP_MISC_PLACEHOLDER + 8UL) /*0x98*/
\r
605 #define USER_EVENT (EVENTGROUP_USEREVENT + 0UL)
\r
607 /* Allow for 0-15 arguments (the number of args is added to event code) */
\r
608 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15UL) /*0xA7*/
\r
610 /*******************************************************************************
\r
611 * XTS Event - eXtended TimeStamp events
\r
612 * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.
\r
613 * the time since the last stored event. The DTS fields are either 1 or 2 bytes
\r
614 * in the other events, depending on the bytes available in the event struct.
\r
615 * If the time since the last event (the DTS) is larger than allowed for by
\r
616 * the DTS field of the current event, an XTS event is inserted immediately
\r
617 * before the original event. The XTS event contains up to 3 additional bytes
\r
618 * of the DTS value - the higher bytes of the true DTS value. The lower 1-2
\r
619 * bytes are stored in the normal DTS field.
\r
620 * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored
\r
621 * when there is only room for 1 byte (8 bit) DTS data in the original event,
\r
622 * which means a limit of 0xFF (255UL). The XTS16 is used when the original event
\r
623 * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535UL).
\r
625 * Using a very high frequency time base can result in many XTS events.
\r
626 * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,
\r
627 * at most 65535. If your time base has a higher frequency, you can define
\r
629 ******************************************************************************/
\r
631 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16UL) /*0xA8*/
\r
632 #define XTS8 (EVENTGROUP_SYS + 0UL) /*0xA8*/
\r
633 #define XTS16 (EVENTGROUP_SYS + 1UL) /*0xA9*/
\r
634 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2UL) /*0xAA*/
\r
635 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3UL) /*0xAB*/
\r
636 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4UL) /*0xAC*/
\r
637 #define LOW_POWER_END (EVENTGROUP_SYS + 5UL) /*0xAD*/
\r
638 #define XID (EVENTGROUP_SYS + 6UL) /*0xAE*/
\r
639 #define XTS16L (EVENTGROUP_SYS + 7UL) /*0xAF*/
\r
641 #define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8UL) /*0xB0*/
\r
642 #define TIMER_CREATE (EVENTGROUP_TIMER + 0UL) /*0xB0*/
\r
643 #define TIMER_START (EVENTGROUP_TIMER + 1UL) /*0xB1*/
\r
644 #define TIMER_RST (EVENTGROUP_TIMER + 2UL) /*0xB2*/
\r
645 #define TIMER_STOP (EVENTGROUP_TIMER + 3UL) /*0xB3*/
\r
646 #define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4UL) /*0xB4*/
\r
647 #define TIMER_DELETE_OBJ (EVENTGROUP_TIMER + 5UL) /*0xB5*/
\r
648 #define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6UL) /*0xB6*/
\r
649 #define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7UL) /*0xB7*/
\r
650 #define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8UL) /*0xB8*/
\r
652 #define TIMER_CREATE_TRCFAILED (EVENTGROUP_TIMER + 9UL) /*0xB9*/
\r
653 #define TIMER_START_TRCFAILED (EVENTGROUP_TIMER + 10UL) /*0xBA*/
\r
654 #define TIMER_RESET_TRCFAILED (EVENTGROUP_TIMER + 11UL) /*0xBB*/
\r
655 #define TIMER_STOP_TRCFAILED (EVENTGROUP_TIMER + 12UL) /*0xBC*/
\r
656 #define TIMER_CHANGE_PERIOD_TRCFAILED (EVENTGROUP_TIMER + 13UL) /*0xBD*/
\r
657 #define TIMER_DELETE_TRCFAILED (EVENTGROUP_TIMER + 14UL) /*0xBE*/
\r
658 #define TIMER_START_FROM_ISR_TRCFAILED (EVENTGROUP_TIMER + 15UL) /*0xBF*/
\r
659 #define TIMER_RESET_FROM_ISR_TRCFAILED (EVENTGROUP_TIMER + 16UL) /*0xC0*/
\r
660 #define TIMER_STOP_FROM_ISR_TRCFAILED (EVENTGROUP_TIMER + 17UL) /*0xC1*/
\r
662 #define EVENTGROUP_EG (EVENTGROUP_TIMER + 18UL) /*0xC2*/
\r
663 #define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0UL) /*0xC2*/
\r
664 #define EVENT_GROUP_CREATE_TRCFAILED (EVENTGROUP_EG + 1UL) /*0xC3*/
\r
665 #define EVENT_GROUP_SYNC_TRCBLOCK (EVENTGROUP_EG + 2UL) /*0xC4*/
\r
666 #define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3UL) /*0xC5*/
\r
667 #define EVENT_GROUP_WAIT_BITS_TRCBLOCK (EVENTGROUP_EG + 4UL) /*0xC6*/
\r
668 #define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5UL) /*0xC7*/
\r
669 #define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6UL) /*0xC8*/
\r
670 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7UL) /*0xC9*/
\r
671 #define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8UL) /*0xCA*/
\r
672 #define EVENT_GROUP_DELETE_OBJ (EVENTGROUP_EG + 9UL) /*0xCB*/
\r
673 #define EVENT_GROUP_SYNC_END_TRCFAILED (EVENTGROUP_EG + 10UL) /*0xCC*/
\r
674 #define EVENT_GROUP_WAIT_BITS_END_TRCFAILED (EVENTGROUP_EG + 11UL) /*0xCD*/
\r
675 #define EVENT_GROUP_SET_BITS_FROM_ISR (EVENTGROUP_EG + 12UL) /*0xCE*/
\r
676 #define EVENT_GROUP_SET_BITS_FROM_ISR_TRCFAILED (EVENTGROUP_EG + 13UL) /*0xCF*/
\r
678 #define TASK_INSTANCE_FINISHED_NEXT_KSE (EVENTGROUP_EG + 14UL) /*0xD0*/
\r
679 #define TASK_INSTANCE_FINISHED_DIRECT (EVENTGROUP_EG + 15UL) /*0xD1*/
\r
681 #define TRACE_TASK_NOTIFY_GROUP (EVENTGROUP_EG + 16UL) /*0xD2*/
\r
682 #define TRACE_TASK_NOTIFY (TRACE_TASK_NOTIFY_GROUP + 0UL) /*0xD2*/
\r
683 #define TRACE_TASK_NOTIFY_TAKE (TRACE_TASK_NOTIFY_GROUP + 1UL) /*0xD3*/
\r
684 #define TRACE_TASK_NOTIFY_TAKE_TRCBLOCK (TRACE_TASK_NOTIFY_GROUP + 2UL) /*0xD4*/
\r
685 #define TRACE_TASK_NOTIFY_TAKE_TRCFAILED (TRACE_TASK_NOTIFY_GROUP + 3UL) /*0xD5*/
\r
686 #define TRACE_TASK_NOTIFY_WAIT (TRACE_TASK_NOTIFY_GROUP + 4UL) /*0xD6*/
\r
687 #define TRACE_TASK_NOTIFY_WAIT_TRCBLOCK (TRACE_TASK_NOTIFY_GROUP + 5UL) /*0xD7*/
\r
688 #define TRACE_TASK_NOTIFY_WAIT_TRCFAILED (TRACE_TASK_NOTIFY_GROUP + 6UL) /*0xD8*/
\r
689 #define TRACE_TASK_NOTIFY_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 7UL) /*0xD9*/
\r
690 #define TRACE_TASK_NOTIFY_GIVE_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 8UL) /*0xDA*/
\r
692 #define TIMER_EXPIRED (TRACE_TASK_NOTIFY_GROUP + 9UL) /* 0xDB */
\r
694 /* Events on queue peek (receive) */
\r
695 #define EVENTGROUP_PEEK_TRCBLOCK (TRACE_TASK_NOTIFY_GROUP + 10UL) /*0xDC*/
\r
696 /* peek block on queue: 0xDC */
\r
697 /* peek block on semaphore: 0xDD */
\r
698 /* peek block on mutex: 0xDE */
\r
700 /* Events on queue peek (receive) */
\r
701 #define EVENTGROUP_PEEK_TRCFAILED (EVENTGROUP_PEEK_TRCBLOCK + 3UL) /*0xDF*/
\r
702 /* peek failed on queue: 0xDF */
\r
703 /* peek failed on semaphore: 0xE0 */
\r
704 /* peek failed on mutex: 0xE1 */
\r
706 #define EVENTGROUP_STREAMBUFFER_DIV (EVENTGROUP_PEEK_TRCFAILED + 3UL) /*0xE2*/
\r
707 #define TRACE_STREAMBUFFER_RESET (EVENTGROUP_STREAMBUFFER_DIV + 0) /*0xE2*/
\r
708 #define TRACE_MESSAGEBUFFER_RESET (EVENTGROUP_STREAMBUFFER_DIV + 1UL) /*0xE3*/
\r
709 #define TRACE_STREAMBUFFER_OBJCLOSE_NAME_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 2UL) /*0xE4*/
\r
710 #define TRACE_MESSAGEBUFFER_OBJCLOSE_NAME_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 3UL) /*0xE5*/
\r
711 #define TRACE_STREAMBUFFER_OBJCLOSE_PROP_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 4UL) /*0xE6*/
\r
712 #define TRACE_MESSAGEBUFFER_OBJCLOSE_PROP_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 5UL) /*0xE7*/
\r
714 /* The following are using previously "lost" event codes */
\r
715 #define TRACE_STREAMBUFFER_CREATE_OBJ_TRCSUCCESS (EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 4UL) /*0x1C*/
\r
716 #define TRACE_STREAMBUFFER_CREATE_OBJ_TRCFAILED (EVENTGROUP_CREATE_OBJ_TRCFAILED + 4UL) /*0x44*/
\r
717 #define TRACE_STREAMBUFFER_DELETE_OBJ_TRCSUCCESS (EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 4UL) /*0x84*/
\r
718 #define TRACE_STREAMBUFFER_SEND_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 3UL) /*0x23*/
\r
719 #define TRACE_STREAMBUFFER_SEND_TRCBLOCK (EVENTGROUP_SEND_TRCBLOCK + 3UL) /*0x73*/
\r
720 #define TRACE_STREAMBUFFER_SEND_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 3UL) /*0x4B*/
\r
721 #define TRACE_STREAMBUFFER_RECEIVE_TRCSUCCESS (EVENTGROUP_RECEIVE_TRCSUCCESS + 3UL) /*0x2B*/
\r
722 #define TRACE_STREAMBUFFER_RECEIVE_TRCBLOCK (EVENTGROUP_RECEIVE_TRCBLOCK + 3UL) /*0x6B*/
\r
723 #define TRACE_STREAMBUFFER_RECEIVE_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 3UL) /*0x53*/
\r
724 #define TRACE_STREAMBUFFER_SEND_FROM_ISR_TRCSUCCESS (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 3UL) /*0x33*/
\r
725 #define TRACE_STREAMBUFFER_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 3UL) /*0x5B*/
\r
726 #define TRACE_STREAMBUFFER_RECEIVE_FROM_ISR_TRCSUCCESS (EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 3UL) /*0x3B*/
\r
727 #define TRACE_STREAMBUFFER_RECEIVE_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 3UL) /*0x63*/
\r
729 /* The following are using previously "lost" event codes. These macros aren't even directly referenced, instead we do (equivalent STREAMBUFFER code) + 1. */
\r
730 #define TRACE_MESSAGEBUFFER_CREATE_OBJ_TRCSUCCESS (EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 5UL) /*0x1D*/
\r
731 #define TRACE_MESSAGEBUFFER_CREATE_OBJ_TRCFAILED (EVENTGROUP_CREATE_OBJ_TRCFAILED + 5UL) /*0x45*/
\r
732 #define TRACE_MESSAGEBUFFER_DELETE_OBJ_TRCSUCCESS (EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 5UL) /*0x85*/
\r
733 #define TRACE_MESSAGEBUFFER_SEND_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 4UL) /*0x24*/
\r
734 #define TRACE_MESSAGEBUFFER_SEND_TRCBLOCK (EVENTGROUP_SEND_TRCBLOCK + 4UL) /*0x74*/
\r
735 #define TRACE_MESSAGEBUFFER_SEND_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 4UL) /*0x4C*/
\r
736 #define TRACE_MESSAGEBUFFER_RECEIVE_TRCSUCCESS (EVENTGROUP_RECEIVE_TRCSUCCESS + 4UL) /*0x2C*/
\r
737 #define TRACE_MESSAGEBUFFER_RECEIVE_TRCBLOCK (EVENTGROUP_RECEIVE_TRCBLOCK + 4UL) /*0x6C*/
\r
738 #define TRACE_MESSAGEBUFFER_RECEIVE_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 4UL) /*0x54*/
\r
739 #define TRACE_MESSAGEBUFFER_SEND_FROM_ISR_TRCSUCCESS (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 4UL) /*0x34*/
\r
740 #define TRACE_MESSAGEBUFFER_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 4UL) /*0x5C*/
\r
741 #define TRACE_MESSAGEBUFFER_RECEIVE_FROM_ISR_TRCSUCCESS (EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 4UL) /*0x3C*/
\r
742 #define TRACE_MESSAGEBUFFER_RECEIVE_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 4UL) /*0x64*/
\r
744 /* LAST EVENT (0xE7) */
\r
746 /****************************
\r
747 * MACROS TO GET TRACE CLASS *
\r
748 ****************************/
\r
749 #define TRACE_GET_TRACE_CLASS_FROM_TASK_CLASS(kernelClass) (TRACE_CLASS_TASK)
\r
750 #define TRACE_GET_TRACE_CLASS_FROM_TASK_OBJECT(pxObject) (TRACE_CLASS_TASK)
\r
752 #define TRACE_GET_TRACE_CLASS_FROM_QUEUE_CLASS(kernelClass) TraceQueueClassTable[kernelClass]
\r
753 #define TRACE_GET_TRACE_CLASS_FROM_QUEUE_OBJECT(pxObject) TRACE_GET_TRACE_CLASS_FROM_QUEUE_CLASS(prvTraceGetQueueType(pxObject))
\r
755 #define TRACE_GET_TRACE_CLASS_FROM_TIMER_CLASS(kernelClass) (TRACE_CLASS_TIMER)
\r
756 #define TRACE_GET_TRACE_CLASS_FROM_TIMER_OBJECT(pxObject) (TRACE_CLASS_TIMER)
\r
758 #define TRACE_GET_TRACE_CLASS_FROM_EVENTGROUP_CLASS(kernelClass) (TRACE_CLASS_EVENTGROUP)
\r
759 #define TRACE_GET_TRACE_CLASS_FROM_EVENTGROUP_OBJECT(pxObject) (TRACE_CLASS_EVENTGROUP)
\r
761 /* TRACE_GET_TRACE_CLASS_FROM_STREAMBUFFER_CLASS can only be accessed with a parameter indicating if it is a MessageBuffer */
\r
762 #define TRACE_GET_TRACE_CLASS_FROM_STREAMBUFFER_CLASS(xIsMessageBuffer) (xIsMessageBuffer == 1 ? TRACE_CLASS_MESSAGEBUFFER : TRACE_CLASS_STREAMBUFFER)
\r
763 #define TRACE_GET_TRACE_CLASS_FROM_STREAMBUFFER_OBJECT(pxObject) (prvGetStreamBufferType(pxObject) == 1 ? TRACE_CLASS_MESSAGEBUFFER : TRACE_CLASS_STREAMBUFFER)
\r
765 /* Generic versions */
\r
766 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TRACE_GET_TRACE_CLASS_FROM_##CLASS##_CLASS(kernelClass)
\r
767 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_TRACE_CLASS_FROM_##CLASS##_OBJECT(pxObject)
\r
769 /******************************
\r
770 * MACROS TO GET OBJECT NUMBER *
\r
771 ******************************/
\r
772 #define TRACE_GET_TASK_NUMBER(pxTCB) (traceHandle)(prvTraceGetTaskNumberLow16(pxTCB))
\r
773 #define TRACE_SET_TASK_NUMBER(pxTCB) prvTraceSetTaskNumberLow16(pxTCB, prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(TASK, pxTCB)));
\r
775 #define TRACE_GET_QUEUE_NUMBER(queue) ( ( traceHandle ) prvTraceGetQueueNumberLow16(queue) )
\r
776 #define TRACE_SET_QUEUE_NUMBER(queue) prvTraceSetQueueNumberLow16(queue, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, queue)));
\r
778 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
779 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( traceHandle ) prvTraceGetTimerNumberLow16(tmr) )
\r
780 #define TRACE_SET_TIMER_NUMBER(tmr) prvTraceSetTimerNumberLow16(tmr, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr)));
\r
781 #else /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
782 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( traceHandle ) ((Timer_t*)tmr)->uxTimerNumber )
\r
783 #define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr));
\r
784 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
786 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0)
\r
787 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( traceHandle ) prvTraceGetEventGroupNumberLow16(eg) )
\r
788 #define TRACE_SET_EVENTGROUP_NUMBER(eg) prvTraceSetEventGroupNumberLow16(eg, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg)));
\r
789 #else /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
790 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( traceHandle ) uxEventGroupGetNumber(eg) )
\r
791 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg));
\r
792 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_10_0_0) */
\r
795 #define TRACE_GET_STREAMBUFFER_NUMBER(sb) ( ( traceHandle ) prvTraceGetStreamBufferNumberLow16(sb) )
\r
796 #define TRACE_SET_STREAMBUFFER_NUMBER(sb) prvTraceSetStreamBufferNumberLow16(sb, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(STREAMBUFFER, sb)));
\r
798 /* Generic versions */
\r
799 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) TRACE_GET_##CLASS##_NUMBER(pxObject)
\r
800 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) TRACE_SET_##CLASS##_NUMBER(pxObject)
\r
802 /******************************
\r
803 * MACROS TO GET EVENT CODES *
\r
804 ******************************/
\r
805 #define TRACE_GET_TASK_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(TASK, kernelClass))
\r
806 #define TRACE_GET_QUEUE_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(QUEUE, kernelClass))
\r
807 #define TRACE_GET_TIMER_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) -- THIS IS NOT USED --
\r
808 #define TRACE_GET_EVENTGROUP_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) -- THIS IS NOT USED --
\r
809 #define TRACE_GET_STREAMBUFFER_CLASS_EVENT_CODE(SERVICE, RESULT, isMessageBuffer) (uint8_t)(TRACE_STREAMBUFFER_##SERVICE##_##RESULT + (uint8_t)isMessageBuffer)
\r
811 #define TRACE_GET_TASK_OBJECT_EVENT_CODE(SERVICE, RESULT, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)
\r
812 #define TRACE_GET_QUEUE_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxObject))
\r
813 #define TRACE_GET_TIMER_OBJECT_EVENT_CODE(SERVICE, RESULT, UNUSED) -- THIS IS NOT USED --
\r
814 #define TRACE_GET_EVENTGROUP_OBJECT_EVENT_CODE(SERVICE, RESULT, UNUSED) -- THIS IS NOT USED --
\r
815 #define TRACE_GET_STREAMBUFFER_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject) (uint8_t)(TRACE_STREAMBUFFER_##SERVICE##_##RESULT + prvGetStreamBufferType(pxObject))
\r
817 /* Generic versions */
\r
818 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) TRACE_GET_##CLASS##_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass)
\r
819 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) TRACE_GET_##CLASS##_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject)
\r
821 /******************************
\r
822 * SPECIAL MACROS FOR TASKS *
\r
823 ******************************/
\r
824 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)
\r
825 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)
\r
827 /*** The trace macros for snapshot mode **************************************/
\r
829 /* A macro that will update the tick count when returning from tickless idle */
\r
830 #undef traceINCREASE_TICK_COUNT
\r
831 #define traceINCREASE_TICK_COUNT( xCount )
\r
833 /* Called for each task that becomes ready */
\r
834 #undef traceMOVED_TASK_TO_READY_STATE
\r
835 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
\r
836 trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);
\r
838 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
\r
839 #undef traceTASK_INCREMENT_TICK
\r
841 #if (TRC_CFG_FREERTOS_VERSION <= TRC_FREERTOS_VERSION_7_4)
\r
843 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
844 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
845 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
849 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
850 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || xPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
851 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
855 /* Called on each task-switch */
\r
856 #undef traceTASK_SWITCHED_IN
\r
857 #define traceTASK_SWITCHED_IN() \
\r
858 trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());
\r
860 /* Called on vTaskCreate */
\r
861 #undef traceTASK_CREATE
\r
862 #define traceTASK_CREATE(pxNewTCB) \
\r
863 if (pxNewTCB != NULL) \
\r
865 trcKERNEL_HOOKS_TASK_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, TASK, pxNewTCB), TASK, pxNewTCB); \
\r
868 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
\r
869 #undef traceTASK_CREATE_FAILED
\r
870 #define traceTASK_CREATE_FAILED() \
\r
871 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, TASK, NOT_USED), 0);
\r
873 /* Called on vTaskDelete */
\r
874 #undef traceTASK_DELETE
\r
875 #define traceTASK_DELETE( pxTaskToDelete ) \
\r
876 { TRACE_ALLOC_CRITICAL_SECTION(); \
\r
877 TRACE_ENTER_CRITICAL_SECTION(); \
\r
878 trcKERNEL_HOOKS_TASK_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, TASK, pxTaskToDelete), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, TASK, pxTaskToDelete), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, TASK, pxTaskToDelete), pxTaskToDelete); \
\r
879 TRACE_EXIT_CRITICAL_SECTION(); }
\r
881 #if (TRC_CFG_SCHEDULING_ONLY == 0)
\r
883 #if defined(configUSE_TICKLESS_IDLE)
\r
884 #if (configUSE_TICKLESS_IDLE != 0)
\r
886 #undef traceLOW_POWER_IDLE_BEGIN
\r
887 #define traceLOW_POWER_IDLE_BEGIN() \
\r
889 extern uint32_t trace_disable_timestamp; \
\r
890 prvTraceStoreLowPower(0); \
\r
891 trace_disable_timestamp = 1; \
\r
894 #undef traceLOW_POWER_IDLE_END
\r
895 #define traceLOW_POWER_IDLE_END() \
\r
897 extern uint32_t trace_disable_timestamp; \
\r
898 trace_disable_timestamp = 0; \
\r
899 prvTraceStoreLowPower(1); \
\r
902 #endif /* (configUSE_TICKLESS_IDLE != 0) */
\r
903 #endif /* defined(configUSE_TICKLESS_IDLE) */
\r
905 /* Called on vTaskSuspend */
\r
906 #undef traceTASK_SUSPEND
\r
907 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
\r
908 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);
\r
910 /* Called from special case with timer only */
\r
911 #undef traceTASK_DELAY_SUSPEND
\r
912 #define traceTASK_DELAY_SUSPEND( pxTaskToSuspend ) \
\r
913 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend); \
\r
914 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
916 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
\r
917 #undef traceTASK_DELAY
\r
918 #define traceTASK_DELAY() \
\r
919 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \
\r
920 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
922 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
\r
923 #undef traceTASK_DELAY_UNTIL
\r
924 #if TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0
\r
925 #define traceTASK_DELAY_UNTIL(xTimeToWake) \
\r
926 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
\r
927 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
928 #else /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
929 #define traceTASK_DELAY_UNTIL() \
\r
930 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
\r
931 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
932 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
934 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
\r
935 #undef traceQUEUE_CREATE
\r
936 #define traceQUEUE_CREATE( pxNewQueue ) \
\r
937 trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, QUEUE, pxNewQueue), QUEUE, pxNewQueue);
\r
939 /* Called in xQueueCreate, if the queue creation fails */
\r
940 #undef traceQUEUE_CREATE_FAILED
\r
941 #define traceQUEUE_CREATE_FAILED( queueType ) \
\r
942 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, queueType), 0);
\r
944 /* Called on vQueueDelete */
\r
945 #undef traceQUEUE_DELETE
\r
946 #define traceQUEUE_DELETE( pxQueue ) \
\r
947 { TRACE_ALLOC_CRITICAL_SECTION(); \
\r
948 TRACE_ENTER_CRITICAL_SECTION(); \
\r
949 trcKERNEL_HOOKS_OBJECT_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, QUEUE, pxQueue), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, QUEUE, pxQueue), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
950 TRACE_EXIT_CRITICAL_SECTION(); }
\r
952 /* This macro is not necessary as of FreeRTOS v9.0.0 */
\r
953 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0)
\r
954 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
\r
955 #undef traceCREATE_MUTEX
\r
956 #define traceCREATE_MUTEX( pxNewQueue ) \
\r
957 trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, QUEUE, pxNewQueue), QUEUE, pxNewQueue);
\r
959 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
\r
960 #undef traceCREATE_MUTEX_FAILED
\r
961 #define traceCREATE_MUTEX_FAILED() \
\r
962 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, queueQUEUE_TYPE_MUTEX), 0);
\r
963 #endif /* (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0) */
\r
965 /* Called when the Mutex can not be given, since not holder */
\r
966 #undef traceGIVE_MUTEX_RECURSIVE_FAILED
\r
967 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \
\r
968 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, QUEUE, pxMutex), QUEUE, pxMutex);
\r
970 /* Called when a message is sent to a queue */ /* CS IS NEW ! */
\r
971 #undef traceQUEUE_SEND
\r
972 #define traceQUEUE_SEND( pxQueue ) \
\r
973 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
974 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
976 /* Called when a message failed to be sent to a queue (timeout) */
\r
977 #undef traceQUEUE_SEND_FAILED
\r
978 #define traceQUEUE_SEND_FAILED( pxQueue ) \
\r
979 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue);
\r
981 /* Called when the task is blocked due to a send operation on a full queue */
\r
982 #undef traceBLOCKING_ON_QUEUE_SEND
\r
983 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
\r
984 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue);
\r
986 /* Called when a message is received from a queue */
\r
987 #undef traceQUEUE_RECEIVE
\r
988 #define traceQUEUE_RECEIVE( pxQueue ) \
\r
989 if (isQueueReceiveHookActuallyPeek) \
\r
991 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
995 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
997 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
999 /* Called when a receive operation on a queue fails (timeout) */
\r
1000 #undef traceQUEUE_RECEIVE_FAILED
\r
1001 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
\r
1002 if (isQueueReceiveHookActuallyPeek) \
\r
1004 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1008 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1011 /* Called when the task is blocked due to a receive operation on an empty queue */
\r
1012 #undef traceBLOCKING_ON_QUEUE_RECEIVE
\r
1013 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
\r
1014 if (isQueueReceiveHookActuallyPeek) \
\r
1016 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1020 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1022 if (TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) != TRACE_CLASS_MUTEX) \
\r
1024 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); \
\r
1027 /* Called on xQueuePeek */
\r
1028 #undef traceQUEUE_PEEK
\r
1029 #define traceQUEUE_PEEK( pxQueue ) \
\r
1030 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue);
\r
1032 /* Called on xQueuePeek fail/timeout (added in FreeRTOS v9.0.2) */
\r
1033 #undef traceQUEUE_PEEK_FAILED
\r
1034 #define traceQUEUE_PEEK_FAILED( pxQueue ) \
\r
1035 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue);
\r
1037 /* Called on xQueuePeek blocking (added in FreeRTOS v9.0.2) */
\r
1038 #undef traceBLOCKING_ON_QUEUE_PEEK
\r
1039 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) \
\r
1040 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1041 if (TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) != TRACE_CLASS_MUTEX) \
\r
1043 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); \
\r
1046 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
\r
1047 #undef traceQUEUE_SEND_FROM_ISR
\r
1048 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
\r
1049 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1050 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
1052 /* Called when a message send from interrupt context fails (since the queue was full) */
\r
1053 #undef traceQUEUE_SEND_FROM_ISR_FAILED
\r
1054 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
\r
1055 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue);
\r
1057 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
\r
1058 #undef traceQUEUE_RECEIVE_FROM_ISR
\r
1059 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
\r
1060 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
\r
1061 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
1063 /* Called when a message receive from interrupt context fails (since the queue was empty) */
\r
1064 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
\r
1065 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
\r
1066 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue);
\r
1068 #undef traceQUEUE_REGISTRY_ADD
\r
1069 #define traceQUEUE_REGISTRY_ADD(object, name) prvTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, object), TRACE_GET_OBJECT_NUMBER(QUEUE, object), name);
\r
1071 /* Called in vTaskPrioritySet */
\r
1072 #undef traceTASK_PRIORITY_SET
\r
1073 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
\r
1074 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);
\r
1076 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
\r
1077 #undef traceTASK_PRIORITY_INHERIT
\r
1078 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
\r
1079 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);
\r
1081 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
\r
1082 #undef traceTASK_PRIORITY_DISINHERIT
\r
1083 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
\r
1084 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);
\r
1086 /* Called in vTaskResume */
\r
1087 #undef traceTASK_RESUME
\r
1088 #define traceTASK_RESUME( pxTaskToResume ) \
\r
1089 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);
\r
1091 /* Called in vTaskResumeFromISR */
\r
1092 #undef traceTASK_RESUME_FROM_ISR
\r
1093 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
\r
1094 trcKERNEL_HOOKS_TASK_RESUME_FROM_ISR(TASK_RESUME_FROM_ISR, pxTaskToResume);
\r
1097 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
1099 #if (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1)
\r
1101 extern void vTraceStoreMemMangEvent(uint32_t ecode, uint32_t address, int32_t size);
\r
1103 /* MALLOC and FREE are always stored, no matter if they happen inside filtered task */
\r
1104 #undef traceMALLOC
\r
1105 #define traceMALLOC( pvAddress, uiSize ) \
\r
1106 if (pvAddress != 0) \
\r
1107 vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, (int32_t)uiSize);
\r
1110 #define traceFREE( pvAddress, uiSize ) \
\r
1111 vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, -((int32_t)uiSize));
\r
1113 #endif /* (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) */
\r
1115 #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1)
\r
1117 /* Called in timer.c - xTimerCreate */
\r
1118 #undef traceTIMER_CREATE
\r
1119 #define traceTIMER_CREATE(tmr) \
\r
1120 trcKERNEL_HOOKS_OBJECT_CREATE(TIMER_CREATE, TIMER, tmr);
\r
1122 #undef traceTIMER_CREATE_FAILED
\r
1123 #define traceTIMER_CREATE_FAILED() \
\r
1124 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TIMER_CREATE_TRCFAILED, 0);
\r
1126 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */
\r
1127 #undef traceTIMER_COMMAND_SEND
\r
1128 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
\r
1129 if (xCommandID > tmrCOMMAND_START_DONT_TRACE) \
\r
1131 if (xCommandID == tmrCOMMAND_CHANGE_PERIOD) \
\r
1133 if (xReturn == pdPASS) { \
\r
1134 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TIMER_CHANGE_PERIOD, TIMER, tmr, xOptionalValue); \
\r
1138 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TIMER_CHANGE_PERIOD_TRCFAILED, TIMER, tmr, xOptionalValue); \
\r
1141 else if ((xCommandID == tmrCOMMAND_DELETE) && (xReturn == pdPASS)) \
\r
1143 trcKERNEL_HOOKS_OBJECT_DELETE(TIMER_DELETE_OBJ, EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr), EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr), TIMER, tmr); \
\r
1147 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENTGROUP_TIMER + (uint32_t)xCommandID + ((xReturn == pdPASS) ? 0 : (TIMER_CREATE_TRCFAILED - TIMER_CREATE)), TIMER, tmr, xOptionalValue); \
\r
1151 #undef traceTIMER_EXPIRED
\r
1152 #define traceTIMER_EXPIRED(tmr) \
\r
1153 trcKERNEL_HOOKS_KERNEL_SERVICE(TIMER_EXPIRED, TIMER, tmr);
\r
1155 #endif /* (TRC_CFG_INCLUDE_TIMER_EVENTS == 1) */
\r
1157 #if (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1)
\r
1159 #undef tracePEND_FUNC_CALL
\r
1160 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \
\r
1161 if (ret == pdPASS){ \
\r
1162 trcKERNEL_HOOKS_KERNEL_SERVICE(PEND_FUNC_CALL, TASK, xTimerGetTimerDaemonTaskHandle() ); \
\r
1166 trcKERNEL_HOOKS_KERNEL_SERVICE(PEND_FUNC_CALL_TRCFAILED, TASK, xTimerGetTimerDaemonTaskHandle() ); \
\r
1169 #undef tracePEND_FUNC_CALL_FROM_ISR
\r
1170 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \
\r
1171 if (! uiInEventGroupSetBitsFromISR) \
\r
1172 prvTraceStoreKernelCall(PEND_FUNC_CALL_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTimerGetTimerDaemonTaskHandle()) ); \
\r
1173 uiInEventGroupSetBitsFromISR = 0;
\r
1175 #endif /* (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1) */
\r
1177 #endif /* (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X) */
\r
1179 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1)
\r
1181 #undef traceEVENT_GROUP_CREATE
\r
1182 #define traceEVENT_GROUP_CREATE(eg) \
\r
1183 trcKERNEL_HOOKS_OBJECT_CREATE(EVENT_GROUP_CREATE, EVENTGROUP, eg);
\r
1185 #undef traceEVENT_GROUP_CREATE_FAILED
\r
1186 #define traceEVENT_GROUP_CREATE_FAILED() \
\r
1187 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(EVENT_GROUP_CREATE_TRCFAILED, 0);
\r
1189 #undef traceEVENT_GROUP_DELETE
\r
1190 #define traceEVENT_GROUP_DELETE(eg) \
\r
1191 { TRACE_ALLOC_CRITICAL_SECTION(); \
\r
1192 TRACE_ENTER_CRITICAL_SECTION(); \
\r
1193 trcKERNEL_HOOKS_OBJECT_DELETE(EVENT_GROUP_DELETE_OBJ, EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg), EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg), EVENTGROUP, eg); \
\r
1194 TRACE_EXIT_CRITICAL_SECTION(); }
\r
1196 #undef traceEVENT_GROUP_SYNC_BLOCK
\r
1197 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \
\r
1198 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SYNC_TRCBLOCK, EVENTGROUP, eg, bitsToWaitFor);
\r
1200 #undef traceEVENT_GROUP_SYNC_END
\r
1201 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \
\r
1204 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SYNC_END_TRCFAILED, EVENTGROUP, eg, bitsToWaitFor); \
\r
1208 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SYNC_END, EVENTGROUP, eg, bitsToWaitFor); \
\r
1211 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
\r
1212 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \
\r
1213 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_WAIT_BITS_TRCBLOCK, EVENTGROUP, eg, bitsToWaitFor); \
\r
1214 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
1216 #undef traceEVENT_GROUP_WAIT_BITS_END
\r
1217 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \
\r
1220 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_WAIT_BITS_END_TRCFAILED, EVENTGROUP, eg, bitsToWaitFor); \
\r
1224 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_WAIT_BITS_END, EVENTGROUP, eg, bitsToWaitFor); \
\r
1227 #undef traceEVENT_GROUP_CLEAR_BITS
\r
1228 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
\r
1229 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_CLEAR_BITS, EVENTGROUP, eg, bitsToClear);
\r
1231 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
\r
1232 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
\r
1233 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM_FROM_ISR(EVENT_GROUP_CLEAR_BITS_FROM_ISR, EVENTGROUP, eg, bitsToClear);
\r
1235 #undef traceEVENT_GROUP_SET_BITS
\r
1236 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
\r
1237 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SET_BITS, EVENTGROUP, eg, bitsToSet);
\r
1239 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
\r
1240 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
\r
1241 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM_FROM_ISR(EVENT_GROUP_SET_BITS_FROM_ISR, EVENTGROUP, eg, bitsToSet); \
\r
1242 uiInEventGroupSetBitsFromISR = 1;
\r
1244 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */
\r
1246 #undef traceTASK_NOTIFY_TAKE
\r
1247 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0)
\r
1248 #define traceTASK_NOTIFY_TAKE() \
\r
1249 if (pxCurrentTCB->eNotifyState == eNotified){ \
\r
1250 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE, TASK, pxCurrentTCB, xTicksToWait); \
\r
1253 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCFAILED, TASK, pxCurrentTCB, xTicksToWait); \
\r
1255 #else /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0 */
\r
1256 #define traceTASK_NOTIFY_TAKE() \
\r
1257 if (pxCurrentTCB->ucNotifyState[ uxIndexToWait ] == taskNOTIFICATION_RECEIVED){ \
\r
1258 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE, TASK, pxCurrentTCB, xTicksToWait); \
\r
1260 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCFAILED, TASK, pxCurrentTCB, xTicksToWait);}
\r
1261 #endif /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0 */
\r
1263 #undef traceTASK_NOTIFY_TAKE_BLOCK
\r
1264 #define traceTASK_NOTIFY_TAKE_BLOCK() \
\r
1265 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCBLOCK, TASK, pxCurrentTCB, xTicksToWait); \
\r
1266 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
1268 #undef traceTASK_NOTIFY_WAIT
\r
1269 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0)
\r
1270 #define traceTASK_NOTIFY_WAIT() \
\r
1271 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB) & CurrentFilterMask) \
\r
1273 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
1274 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB), xTicksToWait); \
\r
1276 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCFAILED, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB), xTicksToWait); \
\r
1278 #else /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0 */
\r
1279 #define traceTASK_NOTIFY_WAIT() \
\r
1280 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB) & CurrentFilterMask) \
\r
1282 if (pxCurrentTCB->ucNotifyState[ uxIndexToWait ] == taskNOTIFICATION_RECEIVED) \
\r
1283 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB), xTicksToWait); \
\r
1285 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCFAILED, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB), xTicksToWait); \
\r
1287 #endif /* TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0 */
\r
1289 #undef traceTASK_NOTIFY_WAIT_BLOCK
\r
1290 #define traceTASK_NOTIFY_WAIT_BLOCK() \
\r
1291 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB) & CurrentFilterMask) \
\r
1292 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCBLOCK, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB), xTicksToWait); \
\r
1293 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
\r
1295 #undef traceTASK_NOTIFY
\r
1296 #define traceTASK_NOTIFY() \
\r
1297 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1298 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \
\r
1299 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify));
\r
1301 #undef traceTASK_NOTIFY_FROM_ISR
\r
1302 #define traceTASK_NOTIFY_FROM_ISR() \
\r
1303 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \
\r
1304 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify));
\r
1306 #undef traceTASK_NOTIFY_GIVE_FROM_ISR
\r
1307 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \
\r
1308 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \
\r
1309 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_GIVE_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify));
\r
1311 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1)
\r
1313 #undef traceSTREAM_BUFFER_CREATE
\r
1314 #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ) \
\r
1315 trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, STREAMBUFFER, pxStreamBuffer), STREAMBUFFER, pxStreamBuffer);
\r
1317 #undef traceSTREAM_BUFFER_CREATE_FAILED
\r
1318 #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) \
\r
1319 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, STREAMBUFFER, xIsMessageBuffer), 0);
\r
1321 #undef traceSTREAM_BUFFER_CREATE_STATIC_FAILED
\r
1322 #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ) \
\r
1323 traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer )
\r
1325 #undef traceSTREAM_BUFFER_DELETE
\r
1326 #define traceSTREAM_BUFFER_DELETE( xStreamBuffer ) \
\r
1327 trcKERNEL_HOOKS_OBJECT_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer);
\r
1329 #undef traceSTREAM_BUFFER_RESET
\r
1330 #define traceSTREAM_BUFFER_RESET( xStreamBuffer ) \
\r
1331 trcKERNEL_HOOKS_KERNEL_SERVICE(prvGetStreamBufferType(xStreamBuffer) > 0 ? TRACE_MESSAGEBUFFER_RESET : TRACE_STREAMBUFFER_RESET, STREAMBUFFER, xStreamBuffer); \
\r
1332 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, 0);
\r
1334 #undef traceSTREAM_BUFFER_SEND
\r
1335 #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ) \
\r
1336 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \
\r
1337 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer));
\r
1339 #undef traceBLOCKING_ON_STREAM_BUFFER_SEND
\r
1340 #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ) \
\r
1341 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCBLOCK, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer);
\r
1343 #undef traceSTREAM_BUFFER_SEND_FAILED
\r
1344 #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ) \
\r
1345 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer);
\r
1347 #undef traceSTREAM_BUFFER_RECEIVE
\r
1348 #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ) \
\r
1349 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \
\r
1350 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer));
\r
1353 #undef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE
\r
1354 #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ) \
\r
1355 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCBLOCK, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer);
\r
1357 #undef traceSTREAM_BUFFER_RECEIVE_FAILED
\r
1358 #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ) \
\r
1359 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer);
\r
1361 #undef traceSTREAM_BUFFER_SEND_FROM_ISR
\r
1362 #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ) \
\r
1363 if( xReturn > ( size_t ) 0 ) \
\r
1365 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \
\r
1366 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \
\r
1370 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \
\r
1373 #undef traceSTREAM_BUFFER_RECEIVE_FROM_ISR
\r
1374 #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ) \
\r
1375 if( xReceivedLength > ( size_t ) 0 ) \
\r
1377 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \
\r
1378 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \
\r
1382 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \
\r
1385 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */
\r
1387 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */
\r
1389 #endif /*#if TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT */
\r
1391 /******************************************************************************/
\r
1392 /*** Definitions for Streaming mode *******************************************/
\r
1393 /******************************************************************************/
\r
1394 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)
\r
1396 /*******************************************************************************
\r
1397 * vTraceStoreKernelObjectName
\r
1399 * Set the name for a kernel object (defined by its address).
\r
1400 ******************************************************************************/
\r
1401 void vTraceStoreKernelObjectName(void* object, const char* name);
\r
1403 /*******************************************************************************
\r
1406 * Tells if this task is already executing, or if there has been a task-switch.
\r
1407 * Assumed to be called within a trace hook in kernel context.
\r
1408 *******************************************************************************/
\r
1409 uint32_t prvIsNewTCB(void* pNewTCB);
\r
1411 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()
\r
1413 /*************************************************************************/
\r
1414 /* KERNEL SPECIFIC OBJECT CONFIGURATION */
\r
1415 /*************************************************************************/
\r
1417 /*******************************************************************************
\r
1418 * The event codes - should match the offline config file.
\r
1419 ******************************************************************************/
\r
1421 /*** Event codes for streaming - should match the Tracealyzer config file *****/
\r
1422 #define PSF_EVENT_NULL_EVENT 0x00
\r
1424 #define PSF_EVENT_TRACE_START 0x01
\r
1425 #define PSF_EVENT_TS_CONFIG 0x02
\r
1426 #define PSF_EVENT_OBJ_NAME 0x03
\r
1427 #define PSF_EVENT_TASK_PRIORITY 0x04
\r
1428 #define PSF_EVENT_TASK_PRIO_INHERIT 0x05
\r
1429 #define PSF_EVENT_TASK_PRIO_DISINHERIT 0x06
\r
1430 #define PSF_EVENT_DEFINE_ISR 0x07
\r
1432 #define PSF_EVENT_TASK_CREATE 0x10
\r
1433 #define PSF_EVENT_QUEUE_CREATE 0x11
\r
1434 #define PSF_EVENT_SEMAPHORE_BINARY_CREATE 0x12
\r
1435 #define PSF_EVENT_MUTEX_CREATE 0x13
\r
1436 #define PSF_EVENT_TIMER_CREATE 0x14
\r
1437 #define PSF_EVENT_EVENTGROUP_CREATE 0x15
\r
1438 #define PSF_EVENT_SEMAPHORE_COUNTING_CREATE 0x16
\r
1439 #define PSF_EVENT_MUTEX_RECURSIVE_CREATE 0x17
\r
1440 #define PSF_EVENT_STREAMBUFFER_CREATE 0x18
\r
1441 #define PSF_EVENT_MESSAGEBUFFER_CREATE 0x19
\r
1443 #define PSF_EVENT_TASK_DELETE 0x20
\r
1444 #define PSF_EVENT_QUEUE_DELETE 0x21
\r
1445 #define PSF_EVENT_SEMAPHORE_DELETE 0x22
\r
1446 #define PSF_EVENT_MUTEX_DELETE 0x23
\r
1447 #define PSF_EVENT_TIMER_DELETE 0x24
\r
1448 #define PSF_EVENT_EVENTGROUP_DELETE 0x25
\r
1449 #define PSF_EVENT_STREAMBUFFER_DELETE 0x28
\r
1450 #define PSF_EVENT_MESSAGEBUFFER_DELETE 0x29
\r
1452 #define PSF_EVENT_TASK_READY 0x30
\r
1453 #define PSF_EVENT_NEW_TIME 0x31
\r
1454 #define PSF_EVENT_NEW_TIME_SCHEDULER_SUSPENDED 0x32
\r
1455 #define PSF_EVENT_ISR_BEGIN 0x33
\r
1456 #define PSF_EVENT_ISR_RESUME 0x34
\r
1457 #define PSF_EVENT_TS_BEGIN 0x35
\r
1458 #define PSF_EVENT_TS_RESUME 0x36
\r
1459 #define PSF_EVENT_TASK_ACTIVATE 0x37
\r
1461 #define PSF_EVENT_MALLOC 0x38
\r
1462 #define PSF_EVENT_FREE 0x39
\r
1464 #define PSF_EVENT_LOWPOWER_BEGIN 0x3A
\r
1465 #define PSF_EVENT_LOWPOWER_END 0x3B
\r
1467 #define PSF_EVENT_IFE_NEXT 0x3C
\r
1468 #define PSF_EVENT_IFE_DIRECT 0x3D
\r
1470 #define PSF_EVENT_TASK_CREATE_FAILED 0x40
\r
1471 #define PSF_EVENT_QUEUE_CREATE_FAILED 0x41
\r
1472 #define PSF_EVENT_SEMAPHORE_BINARY_CREATE_FAILED 0x42
\r
1473 #define PSF_EVENT_MUTEX_CREATE_FAILED 0x43
\r
1474 #define PSF_EVENT_TIMER_CREATE_FAILED 0x44
\r
1475 #define PSF_EVENT_EVENTGROUP_CREATE_FAILED 0x45
\r
1476 #define PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED 0x46
\r
1477 #define PSF_EVENT_MUTEX_RECURSIVE_CREATE_FAILED 0x47
\r
1478 #define PSF_EVENT_STREAMBUFFER_CREATE_FAILED 0x49
\r
1479 #define PSF_EVENT_MESSAGEBUFFER_CREATE_FAILED 0x4A
\r
1481 #define PSF_EVENT_TIMER_DELETE_FAILED 0x48
\r
1483 #define PSF_EVENT_QUEUE_SEND 0x50
\r
1484 #define PSF_EVENT_SEMAPHORE_GIVE 0x51
\r
1485 #define PSF_EVENT_MUTEX_GIVE 0x52
\r
1487 #define PSF_EVENT_QUEUE_SEND_FAILED 0x53
\r
1488 #define PSF_EVENT_SEMAPHORE_GIVE_FAILED 0x54
\r
1489 #define PSF_EVENT_MUTEX_GIVE_FAILED 0x55
\r
1491 #define PSF_EVENT_QUEUE_SEND_BLOCK 0x56
\r
1492 #define PSF_EVENT_SEMAPHORE_GIVE_BLOCK 0x57
\r
1493 #define PSF_EVENT_MUTEX_GIVE_BLOCK 0x58
\r
1495 #define PSF_EVENT_QUEUE_SEND_FROMISR 0x59
\r
1496 #define PSF_EVENT_SEMAPHORE_GIVE_FROMISR 0x5A
\r
1498 #define PSF_EVENT_QUEUE_SEND_FROMISR_FAILED 0x5C
\r
1499 #define PSF_EVENT_SEMAPHORE_GIVE_FROMISR_FAILED 0x5D
\r
1501 #define PSF_EVENT_QUEUE_RECEIVE 0x60
\r
1502 #define PSF_EVENT_SEMAPHORE_TAKE 0x61
\r
1503 #define PSF_EVENT_MUTEX_TAKE 0x62
\r
1505 #define PSF_EVENT_QUEUE_RECEIVE_FAILED 0x63
\r
1506 #define PSF_EVENT_SEMAPHORE_TAKE_FAILED 0x64
\r
1507 #define PSF_EVENT_MUTEX_TAKE_FAILED 0x65
\r
1509 #define PSF_EVENT_QUEUE_RECEIVE_BLOCK 0x66
\r
1510 #define PSF_EVENT_SEMAPHORE_TAKE_BLOCK 0x67
\r
1511 #define PSF_EVENT_MUTEX_TAKE_BLOCK 0x68
\r
1513 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR 0x69
\r
1514 #define PSF_EVENT_SEMAPHORE_TAKE_FROMISR 0x6A
\r
1516 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED 0x6C
\r
1517 #define PSF_EVENT_SEMAPHORE_TAKE_FROMISR_FAILED 0x6D
\r
1519 #define PSF_EVENT_QUEUE_PEEK 0x70
\r
1520 #define PSF_EVENT_SEMAPHORE_PEEK 0x71
\r
1521 #define PSF_EVENT_MUTEX_PEEK 0x72
\r
1523 #define PSF_EVENT_QUEUE_PEEK_FAILED 0x73
\r
1524 #define PSF_EVENT_SEMAPHORE_PEEK_FAILED 0x74
\r
1525 #define PSF_EVENT_MUTEX_PEEK_FAILED 0x75
\r
1527 #define PSF_EVENT_QUEUE_PEEK_BLOCK 0x76
\r
1528 #define PSF_EVENT_SEMAPHORE_PEEK_BLOCK 0x77
\r
1529 #define PSF_EVENT_MUTEX_PEEK_BLOCK 0x78
\r
1531 #define PSF_EVENT_TASK_DELAY_UNTIL 0x79
\r
1532 #define PSF_EVENT_TASK_DELAY 0x7A
\r
1533 #define PSF_EVENT_TASK_SUSPEND 0x7B
\r
1534 #define PSF_EVENT_TASK_RESUME 0x7C
\r
1535 #define PSF_EVENT_TASK_RESUME_FROMISR 0x7D
\r
1537 #define PSF_EVENT_TIMER_PENDFUNCCALL 0x80
\r
1538 #define PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR 0x81
\r
1539 #define PSF_EVENT_TIMER_PENDFUNCCALL_FAILED 0x82
\r
1540 #define PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR_FAILED 0x83
\r
1542 #define PSF_EVENT_USER_EVENT 0x90
\r
1544 #define PSF_EVENT_TIMER_START 0xA0
\r
1545 #define PSF_EVENT_TIMER_RESET 0xA1
\r
1546 #define PSF_EVENT_TIMER_STOP 0xA2
\r
1547 #define PSF_EVENT_TIMER_CHANGEPERIOD 0xA3
\r
1548 #define PSF_EVENT_TIMER_START_FROMISR 0xA4
\r
1549 #define PSF_EVENT_TIMER_RESET_FROMISR 0xA5
\r
1550 #define PSF_EVENT_TIMER_STOP_FROMISR 0xA6
\r
1551 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR 0xA7
\r
1552 #define PSF_EVENT_TIMER_START_FAILED 0xA8
\r
1553 #define PSF_EVENT_TIMER_RESET_FAILED 0xA9
\r
1554 #define PSF_EVENT_TIMER_STOP_FAILED 0xAA
\r
1555 #define PSF_EVENT_TIMER_CHANGEPERIOD_FAILED 0xAB
\r
1556 #define PSF_EVENT_TIMER_START_FROMISR_FAILED 0xAC
\r
1557 #define PSF_EVENT_TIMER_RESET_FROMISR_FAILED 0xAD
\r
1558 #define PSF_EVENT_TIMER_STOP_FROMISR_FAILED 0xAE
\r
1559 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED 0xAF
\r
1561 #define PSF_EVENT_EVENTGROUP_SYNC 0xB0
\r
1562 #define PSF_EVENT_EVENTGROUP_WAITBITS 0xB1
\r
1563 #define PSF_EVENT_EVENTGROUP_CLEARBITS 0xB2
\r
1564 #define PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR 0xB3
\r
1565 #define PSF_EVENT_EVENTGROUP_SETBITS 0xB4
\r
1566 #define PSF_EVENT_EVENTGROUP_SETBITS_FROMISR 0xB5
\r
1567 #define PSF_EVENT_EVENTGROUP_SYNC_BLOCK 0xB6
\r
1568 #define PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK 0xB7
\r
1569 #define PSF_EVENT_EVENTGROUP_SYNC_FAILED 0xB8
\r
1570 #define PSF_EVENT_EVENTGROUP_WAITBITS_FAILED 0xB9
\r
1572 #define PSF_EVENT_QUEUE_SEND_FRONT 0xC0
\r
1573 #define PSF_EVENT_QUEUE_SEND_FRONT_FAILED 0xC1
\r
1574 #define PSF_EVENT_QUEUE_SEND_FRONT_BLOCK 0xC2
\r
1575 #define PSF_EVENT_QUEUE_SEND_FRONT_FROMISR 0xC3
\r
1576 #define PSF_EVENT_QUEUE_SEND_FRONT_FROMISR_FAILED 0xC4
\r
1577 #define PSF_EVENT_MUTEX_GIVE_RECURSIVE 0xC5
\r
1578 #define PSF_EVENT_MUTEX_GIVE_RECURSIVE_FAILED 0xC6
\r
1579 #define PSF_EVENT_MUTEX_TAKE_RECURSIVE 0xC7
\r
1580 #define PSF_EVENT_MUTEX_TAKE_RECURSIVE_FAILED 0xC8
\r
1582 #define PSF_EVENT_TASK_NOTIFY 0xC9
\r
1583 #define PSF_EVENT_TASK_NOTIFY_TAKE 0xCA
\r
1584 #define PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK 0xCB
\r
1585 #define PSF_EVENT_TASK_NOTIFY_TAKE_FAILED 0xCC
\r
1586 #define PSF_EVENT_TASK_NOTIFY_WAIT 0xCD
\r
1587 #define PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK 0xCE
\r
1588 #define PSF_EVENT_TASK_NOTIFY_WAIT_FAILED 0xCF
\r
1589 #define PSF_EVENT_TASK_NOTIFY_FROM_ISR 0xD0
\r
1590 #define PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR 0xD1
\r
1592 #define PSF_EVENT_TIMER_EXPIRED 0xD2
\r
1594 #define PSF_EVENT_STREAMBUFFER_SEND 0xD3
\r
1595 #define PSF_EVENT_STREAMBUFFER_SEND_BLOCK 0xD4
\r
1596 #define PSF_EVENT_STREAMBUFFER_SEND_FAILED 0xD5
\r
1597 #define PSF_EVENT_STREAMBUFFER_RECEIVE 0xD6
\r
1598 #define PSF_EVENT_STREAMBUFFER_RECEIVE_BLOCK 0xD7
\r
1599 #define PSF_EVENT_STREAMBUFFER_RECEIVE_FAILED 0xD8
\r
1600 #define PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR 0xD9
\r
1601 #define PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR_FAILED 0xDA
\r
1602 #define PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR 0xDB
\r
1603 #define PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR_FAILED 0xDC
\r
1604 #define PSF_EVENT_STREAMBUFFER_RESET 0xDD
\r
1606 #define PSF_EVENT_MESSAGEBUFFER_SEND 0xDE
\r
1607 #define PSF_EVENT_MESSAGEBUFFER_SEND_BLOCK 0xDF
\r
1608 #define PSF_EVENT_MESSAGEBUFFER_SEND_FAILED 0xE0
\r
1609 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE 0xE1
\r
1610 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_BLOCK 0xE2
\r
1611 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_FAILED 0xE3
\r
1612 #define PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR 0xE4
\r
1613 #define PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR_FAILED 0xE5
\r
1614 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR 0xE6
\r
1615 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR_FAILED 0xE7
\r
1616 #define PSF_EVENT_MESSAGEBUFFER_RESET 0xE8
\r
1618 /*** The trace macros for streaming ******************************************/
\r
1620 /* A macro that will update the tick count when returning from tickless idle */
\r
1621 #undef traceINCREASE_TICK_COUNT
\r
1622 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/
\r
1623 #define traceINCREASE_TICK_COUNT( xCount ) { extern uint32_t uiTraceTickCount; uiTraceTickCount += xCount; }
\r
1625 #if (TRC_CFG_INCLUDE_OSTICK_EVENTS == 1)
\r
1626 #define OS_TICK_EVENT(uxSchedulerSuspended, xTickCount) if (uxSchedulerSuspended == (unsigned portBASE_TYPE) pdFALSE) { prvTraceStoreEvent1(PSF_EVENT_NEW_TIME, (uint32_t)(xTickCount + 1)); }
\r
1628 #define OS_TICK_EVENT(uxSchedulerSuspended, xTickCount)
\r
1631 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
\r
1632 #undef traceTASK_INCREMENT_TICK
\r
1633 #if TRC_CFG_FREERTOS_VERSION <= TRC_FREERTOS_VERSION_7_4
\r
1634 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
1635 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \
\r
1636 OS_TICK_EVENT(uxSchedulerSuspended, xTickCount)
\r
1638 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
1639 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \
\r
1640 OS_TICK_EVENT(uxSchedulerSuspended, xTickCount)
\r
1641 #endif /* TRC_CFG_FREERTOS_VERSION <= TRC_FREERTOS_VERSION_7_4 */
\r
1643 /* Called on each task-switch */
\r
1644 #undef traceTASK_SWITCHED_IN
\r
1645 #define traceTASK_SWITCHED_IN() \
\r
1646 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1648 if (prvIsNewTCB(pxCurrentTCB)) \
\r
1650 prvTraceStoreEvent2(PSF_EVENT_TASK_ACTIVATE, (uint32_t)pxCurrentTCB, pxCurrentTCB->uxPriority); \
\r
1654 /* Called for each task that becomes ready */
\r
1655 #if (TRC_CFG_INCLUDE_READY_EVENTS == 1)
\r
1656 #undef traceMOVED_TASK_TO_READY_STATE
\r
1657 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
\r
1658 if (TRACE_GET_OBJECT_FILTER(TASK, pxTCB) & CurrentFilterMask) \
\r
1659 prvTraceStoreEvent1(PSF_EVENT_TASK_READY, (uint32_t)pxTCB);
\r
1662 #undef traceTASK_CREATE
\r
1663 #if TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0
\r
1664 #define traceTASK_CREATE(pxNewTCB) \
\r
1665 if (pxNewTCB != NULL) \
\r
1667 prvTraceSaveSymbol(pxNewTCB, pxNewTCB->pcTaskName); \
\r
1668 prvTraceSaveObjectData(pxNewTCB, pxNewTCB->uxPriority); \
\r
1669 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, pxNewTCB->pcTaskName, pxNewTCB); \
\r
1670 TRACE_SET_OBJECT_FILTER(TASK, pxNewTCB, CurrentFilterGroup); \
\r
1671 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1672 if (TRACE_GET_OBJECT_FILTER(TASK, pxNewTCB) & CurrentFilterMask) \
\r
1673 prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, pxNewTCB->uxPriority); \
\r
1675 #else /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
1676 #define traceTASK_CREATE(pxNewTCB) \
\r
1677 if (pxNewTCB != NULL) \
\r
1679 prvTraceSaveSymbol(pxNewTCB, (const char*)pcName); \
\r
1680 prvTraceSaveObjectData(pxNewTCB, uxPriority); \
\r
1681 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, (const char*)pcName, pxNewTCB); \
\r
1682 TRACE_SET_OBJECT_FILTER(TASK, pxNewTCB, CurrentFilterGroup); \
\r
1683 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1684 if (TRACE_GET_OBJECT_FILTER(TASK, pxNewTCB) & CurrentFilterMask) \
\r
1685 prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, uxPriority); \
\r
1687 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
1689 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
\r
1690 #undef traceTASK_CREATE_FAILED
\r
1691 #define traceTASK_CREATE_FAILED() \
\r
1692 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1693 prvTraceStoreEvent0(PSF_EVENT_TASK_CREATE_FAILED);
\r
1695 /* Called on vTaskDelete */
\r
1696 #undef traceTASK_DELETE // We don't allow for filtering out "delete" events. They are important and not very frequent. Moreover, we can't exclude create events, so this should be symmetrical.
\r
1697 #define traceTASK_DELETE( pxTaskToDelete ) \
\r
1698 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1699 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToDelete) & CurrentFilterMask) \
\r
1700 prvTraceStoreEvent2(PSF_EVENT_TASK_DELETE, (uint32_t)pxTaskToDelete, (pxTaskToDelete != NULL) ? (pxTaskToDelete->uxPriority) : 0); \
\r
1701 prvTraceDeleteSymbol(pxTaskToDelete); \
\r
1702 prvTraceDeleteObjectData(pxTaskToDelete);
\r
1704 #if (TRC_CFG_SCHEDULING_ONLY == 0)
\r
1706 #if (defined(configUSE_TICKLESS_IDLE) && configUSE_TICKLESS_IDLE != 0)
\r
1708 #undef traceLOW_POWER_IDLE_BEGIN
\r
1709 #define traceLOW_POWER_IDLE_BEGIN() \
\r
1711 prvTraceStoreEvent1(PSF_EVENT_LOWPOWER_BEGIN, xExpectedIdleTime); \
\r
1714 #undef traceLOW_POWER_IDLE_END
\r
1715 #define traceLOW_POWER_IDLE_END() \
\r
1717 prvTraceStoreEvent0(PSF_EVENT_LOWPOWER_END); \
\r
1720 #endif /* (defined(configUSE_TICKLESS_IDLE) && configUSE_TICKLESS_IDLE != 0) */
\r
1722 /* Called on vTaskSuspend */
\r
1723 #undef traceTASK_SUSPEND
\r
1724 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
\r
1725 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1726 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToSuspend) & CurrentFilterMask) \
\r
1727 prvTraceStoreEvent1(PSF_EVENT_TASK_SUSPEND, (uint32_t)pxTaskToSuspend);
\r
1729 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
\r
1730 #undef traceTASK_DELAY
\r
1731 #define traceTASK_DELAY() \
\r
1732 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1733 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY, xTicksToDelay);
\r
1735 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
\r
1736 #undef traceTASK_DELAY_UNTIL
\r
1737 #if TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0
\r
1738 #define traceTASK_DELAY_UNTIL(xTimeToWake) \
\r
1739 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1740 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY_UNTIL, (uint32_t)xTimeToWake);
\r
1741 #else /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
1742 #define traceTASK_DELAY_UNTIL() \
\r
1743 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1744 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY_UNTIL, (uint32_t)xTimeToWake);
\r
1745 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
1747 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0)
\r
1748 #define traceQUEUE_CREATE_HELPER() \
\r
1749 case queueQUEUE_TYPE_MUTEX: \
\r
1750 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE, (uint32_t)pxNewQueue); \
\r
1752 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1753 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE, (uint32_t)pxNewQueue); \
\r
1756 #define traceQUEUE_CREATE_HELPER()
\r
1757 #endif /* (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0) */
\r
1759 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
\r
1760 #undef traceQUEUE_CREATE
\r
1761 #define traceQUEUE_CREATE( pxNewQueue )\
\r
1762 TRACE_SET_OBJECT_FILTER(QUEUE, pxNewQueue, CurrentFilterGroup); \
\r
1763 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1765 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxNewQueue) & CurrentFilterMask) \
\r
1767 switch (pxNewQueue->ucQueueType) \
\r
1769 case queueQUEUE_TYPE_BASE: \
\r
1770 prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE, (uint32_t)pxNewQueue, uxQueueLength); \
\r
1772 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1773 prvTraceStoreEvent1(PSF_EVENT_SEMAPHORE_BINARY_CREATE, (uint32_t)pxNewQueue); \
\r
1775 traceQUEUE_CREATE_HELPER() \
\r
1780 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0)
\r
1781 #define traceQUEUE_CREATE_FAILED_HELPER() \
\r
1782 case queueQUEUE_TYPE_MUTEX: \
\r
1783 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE_FAILED, 0); \
\r
1785 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1786 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE_FAILED, 0); \
\r
1789 #define traceQUEUE_CREATE_FAILED_HELPER()
\r
1790 #endif /* (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0) */
\r
1792 /* Called in xQueueCreate, if the queue creation fails */
\r
1793 #undef traceQUEUE_CREATE_FAILED
\r
1794 #define traceQUEUE_CREATE_FAILED( queueType ) \
\r
1795 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1797 switch (queueType) \
\r
1799 case queueQUEUE_TYPE_BASE: \
\r
1800 prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE_FAILED, 0, uxQueueLength); \
\r
1802 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1803 prvTraceStoreEvent1(PSF_EVENT_SEMAPHORE_BINARY_CREATE_FAILED, 0); \
\r
1805 traceQUEUE_CREATE_FAILED_HELPER() \
\r
1809 #undef traceQUEUE_DELETE // We don't allow for filtering out "delete" events. They are important and not very frequent. Moreover, we can't exclude create events, so this should be symmetrical.
\r
1810 #define traceQUEUE_DELETE( pxQueue ) \
\r
1811 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1813 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
1815 switch (pxQueue->ucQueueType) \
\r
1817 case queueQUEUE_TYPE_BASE: \
\r
1818 prvTraceStoreEvent2(PSF_EVENT_QUEUE_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \
\r
1820 case queueQUEUE_TYPE_MUTEX: \
\r
1821 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1822 prvTraceStoreEvent2(PSF_EVENT_MUTEX_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \
\r
1824 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1825 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1826 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \
\r
1831 prvTraceDeleteSymbol(pxQueue);
\r
1833 /* Called in xQueueCreateCountingSemaphore, if the queue creation fails */
\r
1834 #undef traceCREATE_COUNTING_SEMAPHORE
\r
1835 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
1836 #define traceCREATE_COUNTING_SEMAPHORE() \
\r
1837 TRACE_SET_OBJECT_FILTER(QUEUE, xHandle, CurrentFilterGroup); \
\r
1838 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1839 if (TRACE_GET_OBJECT_FILTER(QUEUE, xHandle) & CurrentFilterMask) \
\r
1840 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)xHandle, uxMaxCount)
\r
1841 #elif (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_5_OR_7_6)
\r
1842 #define traceCREATE_COUNTING_SEMAPHORE() \
\r
1843 TRACE_SET_OBJECT_FILTER(QUEUE, xHandle, CurrentFilterGroup); \
\r
1844 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1845 if (TRACE_GET_OBJECT_FILTER(QUEUE, xHandle) & CurrentFilterMask) \
\r
1846 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)xHandle, uxInitialCount);
\r
1847 #elif (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_4)
\r
1848 #define traceCREATE_COUNTING_SEMAPHORE() \
\r
1849 TRACE_SET_OBJECT_FILTER(QUEUE, xHandle, CurrentFilterGroup); \
\r
1850 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1851 if (TRACE_GET_OBJECT_FILTER(QUEUE, xHandle) & CurrentFilterMask) \
\r
1852 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)xHandle, uxCountValue);
\r
1854 #define traceCREATE_COUNTING_SEMAPHORE() \
\r
1855 TRACE_SET_OBJECT_FILTER(QUEUE, pxHandle, CurrentFilterGroup); \
\r
1856 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1857 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxHandle) & CurrentFilterMask) \
\r
1858 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)pxHandle, uxCountValue);
\r
1859 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X */
\r
1861 #undef traceCREATE_COUNTING_SEMAPHORE_FAILED
\r
1862 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
1863 #define traceCREATE_COUNTING_SEMAPHORE_FAILED() \
\r
1864 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1865 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED, 0, uxMaxCount);
\r
1866 #elif (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_5_OR_7_6)
\r
1867 #define traceCREATE_COUNTING_SEMAPHORE_FAILED() \
\r
1868 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1869 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED, 0, uxInitialCount);
\r
1870 #elif (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_7_4)
\r
1871 #define traceCREATE_COUNTING_SEMAPHORE_FAILED() \
\r
1872 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1873 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED, 0, uxCountValue);
\r
1875 #define traceCREATE_COUNTING_SEMAPHORE_FAILED() \
\r
1876 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1877 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED, 0, uxCountValue);
\r
1878 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X */
\r
1881 /* This macro is not necessary as of FreeRTOS v9.0.0 */
\r
1882 #if (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0)
\r
1883 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
\r
1884 #undef traceCREATE_MUTEX
\r
1885 #define traceCREATE_MUTEX( pxNewQueue ) \
\r
1886 TRACE_SET_OBJECT_FILTER(QUEUE, pxNewQueue, CurrentFilterGroup); \
\r
1887 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1889 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxNewQueue) & CurrentFilterMask) \
\r
1891 switch (pxNewQueue->ucQueueType) \
\r
1893 case queueQUEUE_TYPE_MUTEX: \
\r
1894 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE, (uint32_t)pxNewQueue); \
\r
1896 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1897 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE, (uint32_t)pxNewQueue); \
\r
1903 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
\r
1904 #undef traceCREATE_MUTEX_FAILED
\r
1905 #define traceCREATE_MUTEX_FAILED() \
\r
1906 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1907 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE_FAILED, 0);
\r
1908 #endif /* (TRC_CFG_FREERTOS_VERSION < TRC_FREERTOS_VERSION_9_0_0) */
\r
1910 /* Called when a message is sent to a queue */ /* CS IS NEW ! */
\r
1911 #undef traceQUEUE_SEND
\r
1912 #define traceQUEUE_SEND( pxQueue ) \
\r
1913 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1914 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
1915 switch (pxQueue->ucQueueType) \
\r
1917 case queueQUEUE_TYPE_BASE: \
\r
1918 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND : PSF_EVENT_QUEUE_SEND_FRONT, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
1920 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1921 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1922 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
1924 case queueQUEUE_TYPE_MUTEX: \
\r
1925 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1926 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE, (uint32_t)pxQueue); \
\r
1930 /* Called when a message failed to be sent to a queue (timeout) */
\r
1931 #undef traceQUEUE_SEND_FAILED
\r
1932 #define traceQUEUE_SEND_FAILED( pxQueue ) \
\r
1933 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1934 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
1935 switch (pxQueue->ucQueueType) \
\r
1937 case queueQUEUE_TYPE_BASE: \
\r
1938 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FAILED : PSF_EVENT_QUEUE_SEND_FRONT_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1940 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1941 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1942 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1944 case queueQUEUE_TYPE_MUTEX: \
\r
1945 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1946 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_FAILED, (uint32_t)pxQueue); \
\r
1950 /* Called when the task is blocked due to a send operation on a full queue */
\r
1951 #undef traceBLOCKING_ON_QUEUE_SEND
\r
1952 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
\r
1953 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
1954 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
1955 switch (pxQueue->ucQueueType) \
\r
1957 case queueQUEUE_TYPE_BASE: \
\r
1958 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_BLOCK : PSF_EVENT_QUEUE_SEND_FRONT_BLOCK, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1960 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
1961 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
1962 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_BLOCK, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
1964 case queueQUEUE_TYPE_MUTEX: \
\r
1965 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
1966 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_BLOCK, (uint32_t)pxQueue); \
\r
1970 /**************************************************************************/
\r
1971 /* Makes sure xQueueGiveFromISR also has a xCopyPosition parameter */
\r
1972 /**************************************************************************/
\r
1973 /* Helpers needed to correctly expand names */
\r
1974 #define TZ__CAT2(a,b) a ## b
\r
1975 #define TZ__CAT(a,b) TZ__CAT2(a, b)
\r
1977 /* Expands name if this header is included... uxQueueType must be a macro that only exists in queue.c or whatever, and it must expand to nothing or to something that's valid in identifiers */
\r
1978 #define xQueueGiveFromISR(a,b) TZ__CAT(xQueueGiveFromISR__, uxQueueType) (a,b)
\r
1980 /* If in queue.c, the "uxQueueType" macro expands to "pcHead". queueSEND_TO_BACK is the value we need to send in */
\r
1981 #define xQueueGiveFromISR__pcHead(__a, __b) MyWrapper(__a, __b, const BaseType_t xCopyPosition); \
\r
1982 BaseType_t xQueueGiveFromISR(__a, __b) { return MyWrapper(xQueue, pxHigherPriorityTaskWoken, queueSEND_TO_BACK); } \
\r
1983 BaseType_t MyWrapper(__a, __b, const BaseType_t xCopyPosition)
\r
1985 /* If not in queue.c, "uxQueueType" isn't expanded */
\r
1986 #define xQueueGiveFromISR__uxQueueType(__a, __b) xQueueGiveFromISR(__a,__b)
\r
1988 /**************************************************************************/
\r
1989 /* End of xQueueGiveFromISR fix */
\r
1990 /**************************************************************************/
\r
1992 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
\r
1993 #undef traceQUEUE_SEND_FROM_ISR
\r
1994 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
\r
1995 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
1996 switch (pxQueue->ucQueueType) \
\r
1998 case queueQUEUE_TYPE_BASE: \
\r
1999 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR : PSF_EVENT_QUEUE_SEND_FRONT_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
2001 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2002 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2003 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \
\r
2007 /* Called when a message send from interrupt context fails (since the queue was full) */
\r
2008 #undef traceQUEUE_SEND_FROM_ISR_FAILED
\r
2009 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
\r
2010 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2011 switch (pxQueue->ucQueueType) \
\r
2013 case queueQUEUE_TYPE_BASE: \
\r
2014 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR_FAILED : PSF_EVENT_QUEUE_SEND_FRONT_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
2016 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2017 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2018 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
2022 /* Called when a message is received from a queue */
\r
2023 #undef traceQUEUE_RECEIVE
\r
2024 #define traceQUEUE_RECEIVE( pxQueue ) \
\r
2025 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2026 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2027 switch (pxQueue->ucQueueType) \
\r
2029 case queueQUEUE_TYPE_BASE: \
\r
2030 if (isQueueReceiveHookActuallyPeek) \
\r
2031 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \
\r
2033 prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \
\r
2035 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2036 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2037 if (isQueueReceiveHookActuallyPeek) \
\r
2038 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \
\r
2040 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_TAKE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \
\r
2042 case queueQUEUE_TYPE_MUTEX: \
\r
2043 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
2044 if (isQueueReceiveHookActuallyPeek) \
\r
2045 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK, (uint32_t)pxQueue, xTicksToWait); \
\r
2047 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE, (uint32_t)pxQueue, xTicksToWait); \
\r
2051 /* Called when a receive operation on a queue fails (timeout) */
\r
2052 #undef traceQUEUE_RECEIVE_FAILED
\r
2053 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
\r
2054 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2055 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2056 switch (pxQueue->ucQueueType) \
\r
2058 case queueQUEUE_TYPE_BASE: \
\r
2059 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_QUEUE_PEEK_FAILED : PSF_EVENT_QUEUE_RECEIVE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2061 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2062 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2063 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_SEMAPHORE_PEEK_FAILED : PSF_EVENT_SEMAPHORE_TAKE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2065 case queueQUEUE_TYPE_MUTEX: \
\r
2066 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
2067 prvTraceStoreEvent2(isQueueReceiveHookActuallyPeek ? PSF_EVENT_MUTEX_PEEK_FAILED : PSF_EVENT_MUTEX_TAKE_FAILED, (uint32_t)pxQueue, xTicksToWait); \
\r
2071 /* Called when the task is blocked due to a receive operation on an empty queue */
\r
2072 #undef traceBLOCKING_ON_QUEUE_RECEIVE
\r
2073 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
\r
2074 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2075 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2076 switch (pxQueue->ucQueueType) \
\r
2078 case queueQUEUE_TYPE_BASE: \
\r
2079 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_QUEUE_PEEK_BLOCK : PSF_EVENT_QUEUE_RECEIVE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2081 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2082 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2083 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_SEMAPHORE_PEEK_BLOCK : PSF_EVENT_SEMAPHORE_TAKE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2085 case queueQUEUE_TYPE_MUTEX: \
\r
2086 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
2087 prvTraceStoreEvent2(isQueueReceiveHookActuallyPeek ? PSF_EVENT_MUTEX_PEEK_BLOCK : PSF_EVENT_MUTEX_TAKE_BLOCK, (uint32_t)pxQueue, xTicksToWait); \
\r
2091 #if (TRC_CFG_FREERTOS_VERSION > TRC_FREERTOS_VERSION_9_0_1)
\r
2092 /* Called when a peek operation on a queue fails (timeout) */
\r
2093 #undef traceQUEUE_PEEK_FAILED
\r
2094 #define traceQUEUE_PEEK_FAILED( pxQueue ) \
\r
2095 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2096 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2097 switch (pxQueue->ucQueueType) \
\r
2099 case queueQUEUE_TYPE_BASE: \
\r
2100 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2102 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2103 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2104 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2106 case queueQUEUE_TYPE_MUTEX: \
\r
2107 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
2108 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait); \
\r
2112 /* Called when the task is blocked due to a peek operation on an empty queue */
\r
2113 #undef traceBLOCKING_ON_QUEUE_PEEK
\r
2114 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) \
\r
2115 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2116 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2117 switch (pxQueue->ucQueueType) \
\r
2119 case queueQUEUE_TYPE_BASE: \
\r
2120 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2122 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2123 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2124 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2126 case queueQUEUE_TYPE_MUTEX: \
\r
2127 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
2128 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait); \
\r
2132 #endif /* (TRC_CFG_FREERTOS_VERSION > TRC_FREERTOS_VERSION_9_0_1) */
\r
2134 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
\r
2135 #undef traceQUEUE_RECEIVE_FROM_ISR
\r
2136 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
\r
2137 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2138 switch (pxQueue->ucQueueType) \
\r
2140 case queueQUEUE_TYPE_BASE: \
\r
2141 prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting - 1); \
\r
2143 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2144 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2145 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_TAKE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting - 1); \
\r
2149 /* Called when a message receive from interrupt context fails (since the queue was empty) */
\r
2150 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
\r
2151 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
\r
2152 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2153 switch (pxQueue->ucQueueType) \
\r
2155 case queueQUEUE_TYPE_BASE: \
\r
2156 prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
2158 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2159 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2160 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_TAKE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \
\r
2164 /* Called on xQueuePeek */
\r
2165 #undef traceQUEUE_PEEK
\r
2166 #define traceQUEUE_PEEK( pxQueue ) \
\r
2167 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2168 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
\r
2169 switch (pxQueue->ucQueueType) \
\r
2171 case queueQUEUE_TYPE_BASE: \
\r
2172 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2174 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \
\r
2175 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \
\r
2176 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \
\r
2178 case queueQUEUE_TYPE_MUTEX: \
\r
2179 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \
\r
2180 prvTraceStoreEvent1(PSF_EVENT_MUTEX_PEEK, (uint32_t)pxQueue); \
\r
2184 /* Called in vTaskPrioritySet */
\r
2185 #undef traceTASK_PRIORITY_SET
\r
2186 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
\r
2187 prvTraceSaveObjectData(pxTask, uxNewPriority); \
\r
2188 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2189 if (TRACE_GET_OBJECT_FILTER(TASK, pxTask) & CurrentFilterMask) \
\r
2190 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIORITY, (uint32_t)pxTask, uxNewPriority);
\r
2192 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
\r
2193 #undef traceTASK_PRIORITY_INHERIT
\r
2194 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
\r
2195 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2196 if (TRACE_GET_OBJECT_FILTER(TASK, pxTask) & CurrentFilterMask) \
\r
2197 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_INHERIT, (uint32_t)pxTask, uxNewPriority);
\r
2199 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
\r
2200 #undef traceTASK_PRIORITY_DISINHERIT
\r
2201 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
\r
2202 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2203 if (TRACE_GET_OBJECT_FILTER(TASK, pxTask) & CurrentFilterMask) \
\r
2204 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_DISINHERIT, (uint32_t)pxTask, uxNewPriority);
\r
2206 /* Called in vTaskResume */
\r
2207 #undef traceTASK_RESUME
\r
2208 #define traceTASK_RESUME( pxTaskToResume ) \
\r
2209 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2210 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToResume) & CurrentFilterMask) \
\r
2211 prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME, (uint32_t)pxTaskToResume);
\r
2213 /* Called in vTaskResumeFromISR */
\r
2214 #undef traceTASK_RESUME_FROM_ISR
\r
2215 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
\r
2216 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToResume) & CurrentFilterMask) \
\r
2217 prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME_FROMISR, (uint32_t)pxTaskToResume);
\r
2219 #if (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1)
\r
2221 #undef traceMALLOC
\r
2222 #define traceMALLOC( pvAddress, uiSize ) \
\r
2223 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2224 prvTraceStoreEvent2(PSF_EVENT_MALLOC, (uint32_t)pvAddress, uiSize);
\r
2227 #define traceFREE( pvAddress, uiSize ) \
\r
2228 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2229 prvTraceStoreEvent2(PSF_EVENT_FREE, (uint32_t)pvAddress, (uint32_t)(0 - uiSize)); /* "0 -" instead of just "-" to get rid of a warning... */
\r
2231 #endif /* (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) */
\r
2233 #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1)
\r
2235 /* Called in timer.c - xTimerCreate */
\r
2236 #undef traceTIMER_CREATE
\r
2237 #define traceTIMER_CREATE(tmr) \
\r
2238 TRACE_SET_OBJECT_FILTER(TIMER, tmr, CurrentFilterGroup); \
\r
2239 prvTraceSaveSymbol(tmr, tmr->pcTimerName); \
\r
2240 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, tmr->pcTimerName, tmr); \
\r
2241 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2242 if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
\r
2243 prvTraceStoreEvent2(PSF_EVENT_TIMER_CREATE, (uint32_t)tmr, tmr->xTimerPeriodInTicks);
\r
2245 #undef traceTIMER_CREATE_FAILED
\r
2246 #define traceTIMER_CREATE_FAILED() \
\r
2247 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2248 prvTraceStoreEvent0(PSF_EVENT_TIMER_CREATE_FAILED);
\r
2250 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X)
\r
2251 #define traceTIMER_COMMAND_SEND_8_0_CASES(tmr) \
\r
2252 case tmrCOMMAND_RESET: \
\r
2253 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_RESET : PSF_EVENT_TIMER_RESET_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
2255 case tmrCOMMAND_START_FROM_ISR: \
\r
2256 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_START_FROMISR : PSF_EVENT_TIMER_START_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
2258 case tmrCOMMAND_RESET_FROM_ISR: \
\r
2259 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_RESET_FROMISR : PSF_EVENT_TIMER_RESET_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
2261 case tmrCOMMAND_STOP_FROM_ISR: \
\r
2262 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP_FROMISR : PSF_EVENT_TIMER_STOP_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
2264 case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR: \
\r
2265 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR : PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
2267 #else /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X */
\r
2268 #define traceTIMER_COMMAND_SEND_8_0_CASES(tmr)
\r
2269 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_8_X */
\r
2271 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */
\r
2272 #undef traceTIMER_COMMAND_SEND
\r
2273 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
\r
2274 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2275 if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
\r
2276 switch(xCommandID) \
\r
2278 case tmrCOMMAND_START: \
\r
2279 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START : PSF_EVENT_TIMER_START_FAILED, (uint32_t)tmr); \
\r
2281 case tmrCOMMAND_STOP: \
\r
2282 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP : PSF_EVENT_TIMER_STOP_FAILED, (uint32_t)tmr); \
\r
2284 case tmrCOMMAND_CHANGE_PERIOD: \
\r
2285 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD : PSF_EVENT_TIMER_CHANGEPERIOD_FAILED, (uint32_t)tmr, xOptionalValue); \
\r
2287 case tmrCOMMAND_DELETE: \
\r
2288 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_DELETE : PSF_EVENT_TIMER_DELETE_FAILED, (uint32_t)tmr); \
\r
2290 traceTIMER_COMMAND_SEND_8_0_CASES(tmr) \
\r
2293 #undef traceTIMER_EXPIRED
\r
2294 #define traceTIMER_EXPIRED(tmr) \
\r
2295 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2296 if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
\r
2297 prvTraceStoreEvent2(PSF_EVENT_TIMER_EXPIRED, (uint32_t)tmr->pxCallbackFunction, (uint32_t)tmr->pvTimerID);
\r
2299 #endif /* #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1) */
\r
2302 #if (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1)
\r
2304 #undef tracePEND_FUNC_CALL
\r
2305 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \
\r
2306 prvTraceStoreEvent1((ret == pdPASS) ? PSF_EVENT_TIMER_PENDFUNCCALL : PSF_EVENT_TIMER_PENDFUNCCALL_FAILED, (uint32_t)func);
\r
2308 #undef tracePEND_FUNC_CALL_FROM_ISR
\r
2309 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \
\r
2310 prvTraceStoreEvent1((ret == pdPASS) ? PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR : PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR_FAILED, (uint32_t)func);
\r
2312 #endif /* (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1) */
\r
2314 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1)
\r
2316 #undef traceEVENT_GROUP_CREATE
\r
2317 #define traceEVENT_GROUP_CREATE(eg) \
\r
2318 TRACE_SET_OBJECT_FILTER(EVENTGROUP, eg, CurrentFilterGroup); \
\r
2319 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2320 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2321 prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_CREATE, (uint32_t)eg);
\r
2323 #undef traceEVENT_GROUP_DELETE
\r
2324 #define traceEVENT_GROUP_DELETE(eg) \
\r
2325 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2326 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2327 prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_DELETE, (uint32_t)eg); \
\r
2328 prvTraceDeleteSymbol(eg);
\r
2330 #undef traceEVENT_GROUP_CREATE_FAILED
\r
2331 #define traceEVENT_GROUP_CREATE_FAILED() \
\r
2332 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2333 prvTraceStoreEvent0(PSF_EVENT_EVENTGROUP_CREATE_FAILED);
\r
2335 #undef traceEVENT_GROUP_SYNC_BLOCK
\r
2336 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \
\r
2337 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2338 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2339 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SYNC_BLOCK, (uint32_t)eg, bitsToWaitFor);
\r
2341 #undef traceEVENT_GROUP_SYNC_END
\r
2342 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \
\r
2343 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2344 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2345 prvTraceStoreEvent2((wasTimeout != pdTRUE) ? PSF_EVENT_EVENTGROUP_SYNC : PSF_EVENT_EVENTGROUP_SYNC_FAILED, (uint32_t)eg, bitsToWaitFor);
\r
2347 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
\r
2348 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \
\r
2349 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2350 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2351 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK, (uint32_t)eg, bitsToWaitFor);
\r
2353 #undef traceEVENT_GROUP_WAIT_BITS_END
\r
2354 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \
\r
2355 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2356 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2357 prvTraceStoreEvent2((wasTimeout != pdTRUE) ? PSF_EVENT_EVENTGROUP_WAITBITS : PSF_EVENT_EVENTGROUP_WAITBITS_FAILED, (uint32_t)eg, bitsToWaitFor);
\r
2359 #undef traceEVENT_GROUP_CLEAR_BITS
\r
2360 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
\r
2361 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2362 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2363 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS, (uint32_t)eg, bitsToClear);
\r
2365 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
\r
2366 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
\r
2367 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2368 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR, (uint32_t)eg, bitsToClear);
\r
2370 #undef traceEVENT_GROUP_SET_BITS
\r
2371 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
\r
2372 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2373 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2374 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS, (uint32_t)eg, bitsToSet);
\r
2376 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
\r
2377 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
\r
2378 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
\r
2379 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FROMISR, (uint32_t)eg, bitsToSet);
\r
2381 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */
\r
2383 #undef traceTASK_NOTIFY_TAKE
\r
2384 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0)
\r
2385 #define traceTASK_NOTIFY_TAKE() \
\r
2386 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \
\r
2387 if (pxCurrentTCB->ucNotifyState == taskNOTIFICATION_RECEIVED) \
\r
2388 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
2390 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);}
\r
2391 #else /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
2392 #define traceTASK_NOTIFY_TAKE() \
\r
2393 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \
\r
2394 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
2395 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
2397 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);}
\r
2398 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
2400 #undef traceTASK_NOTIFY_TAKE_BLOCK
\r
2401 #define traceTASK_NOTIFY_TAKE_BLOCK() \
\r
2402 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2403 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
2405 #undef traceTASK_NOTIFY_WAIT
\r
2406 #if (TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0)
\r
2407 #define traceTASK_NOTIFY_WAIT() \
\r
2408 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \
\r
2409 if (pxCurrentTCB->ucNotifyState == taskNOTIFICATION_RECEIVED) \
\r
2410 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
2412 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);}
\r
2413 #else /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
2414 #define traceTASK_NOTIFY_WAIT() \
\r
2415 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \
\r
2416 if (pxCurrentTCB->eNotifyState == eNotified) \
\r
2417 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB, xTicksToWait); \
\r
2419 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait);}
\r
2420 #endif /* TRC_CFG_FREERTOS_VERSION >= TRC_FREERTOS_VERSION_9_0_0 */
\r
2422 #undef traceTASK_NOTIFY_WAIT_BLOCK
\r
2423 #define traceTASK_NOTIFY_WAIT_BLOCK() \
\r
2424 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2425 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK, (uint32_t)pxCurrentTCB, xTicksToWait);
\r
2427 #undef traceTASK_NOTIFY
\r
2428 #define traceTASK_NOTIFY() \
\r
2429 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2430 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \
\r
2431 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY, (uint32_t)xTaskToNotify);
\r
2433 #undef traceTASK_NOTIFY_FROM_ISR
\r
2434 #define traceTASK_NOTIFY_FROM_ISR() \
\r
2435 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \
\r
2436 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_FROM_ISR, (uint32_t)xTaskToNotify);
\r
2438 #undef traceTASK_NOTIFY_GIVE_FROM_ISR
\r
2439 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \
\r
2440 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \
\r
2441 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR, (uint32_t)xTaskToNotify);
\r
2443 #undef traceQUEUE_REGISTRY_ADD
\r
2444 #define traceQUEUE_REGISTRY_ADD(object, name) \
\r
2445 prvTraceSaveSymbol(object, (const char*)name); \
\r
2446 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, name, object);
\r
2448 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1)
\r
2450 #undef traceSTREAM_BUFFER_CREATE
\r
2451 #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ) \
\r
2452 TRACE_SET_OBJECT_FILTER(STREAMBUFFER, pxStreamBuffer, CurrentFilterGroup); \
\r
2453 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2454 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, pxStreamBuffer) & CurrentFilterMask) \
\r
2455 prvTraceStoreEvent2(xIsMessageBuffer == 1 ? PSF_EVENT_MESSAGEBUFFER_CREATE : PSF_EVENT_STREAMBUFFER_CREATE, (uint32_t)pxStreamBuffer, xBufferSizeBytes);
\r
2457 #undef traceSTREAM_BUFFER_CREATE_FAILED
\r
2458 #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) \
\r
2459 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2460 prvTraceStoreEvent2(xIsMessageBuffer == 1 ? PSF_EVENT_MESSAGEBUFFER_CREATE_FAILED : PSF_EVENT_STREAMBUFFER_CREATE_FAILED, 0 , xBufferSizeBytes);
\r
2462 #undef traceSTREAM_BUFFER_CREATE_STATIC_FAILED
\r
2463 #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ) \
\r
2464 traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer )
\r
2466 #undef traceSTREAM_BUFFER_DELETE
\r
2467 #define traceSTREAM_BUFFER_DELETE( xStreamBuffer ) \
\r
2468 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2469 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, pxStreamBuffer) & CurrentFilterMask) \
\r
2470 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_DELETE : PSF_EVENT_STREAMBUFFER_DELETE, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \
\r
2471 prvTraceDeleteSymbol(xStreamBuffer);
\r
2473 #undef traceSTREAM_BUFFER_RESET
\r
2474 #define traceSTREAM_BUFFER_RESET( xStreamBuffer ) \
\r
2475 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2476 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2477 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RESET : PSF_EVENT_STREAMBUFFER_RESET, (uint32_t)xStreamBuffer, 0);
\r
2479 #undef traceSTREAM_BUFFER_SEND
\r
2480 #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ) \
\r
2481 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2482 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2483 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND : PSF_EVENT_STREAMBUFFER_SEND, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer));
\r
2485 #undef traceBLOCKING_ON_STREAM_BUFFER_SEND
\r
2486 #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ) \
\r
2487 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2488 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2489 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_BLOCK : PSF_EVENT_STREAMBUFFER_SEND_BLOCK, (uint32_t)xStreamBuffer);
\r
2491 #undef traceSTREAM_BUFFER_SEND_FAILED
\r
2492 #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ) \
\r
2493 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2494 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2495 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_FAILED : PSF_EVENT_STREAMBUFFER_SEND_FAILED, (uint32_t)xStreamBuffer);
\r
2497 #undef traceSTREAM_BUFFER_RECEIVE
\r
2498 #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ) \
\r
2499 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2500 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2501 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE: PSF_EVENT_STREAMBUFFER_RECEIVE, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer));
\r
2503 #undef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE
\r
2504 #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ) \
\r
2505 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2506 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2507 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_BLOCK: PSF_EVENT_STREAMBUFFER_RECEIVE_BLOCK, (uint32_t)xStreamBuffer);
\r
2509 #undef traceSTREAM_BUFFER_RECEIVE_FAILED
\r
2510 #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ) \
\r
2511 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
\r
2512 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2513 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_FAILED: PSF_EVENT_STREAMBUFFER_RECEIVE_FAILED, (uint32_t)xStreamBuffer);
\r
2515 #undef traceSTREAM_BUFFER_SEND_FROM_ISR
\r
2516 #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ) \
\r
2517 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2519 if ( xReturn > ( size_t ) 0 ) \
\r
2521 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR : PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \
\r
2525 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR_FAILED : PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR_FAILED, (uint32_t)xStreamBuffer); \
\r
2529 #undef traceSTREAM_BUFFER_RECEIVE_FROM_ISR
\r
2530 #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ) \
\r
2531 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \
\r
2533 if ( xReceivedLength > ( size_t ) 0 ) \
\r
2535 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR : PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \
\r
2539 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR_FAILED : PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR_FAILED, (uint32_t)xStreamBuffer); \
\r
2543 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */
\r
2545 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */
\r
2547 #endif /* (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING) */
\r
2549 #else /* (TRC_USE_TRACEALYZER_RECORDER == 1) */
\r
2551 /* When recorder is disabled */
\r
2552 #define vTraceSetQueueName(object, name)
\r
2553 #define vTraceSetSemaphoreName(object, name)
\r
2554 #define vTraceSetMutexName(object, name)
\r
2555 #define vTraceSetEventGroupName(object, name)
\r
2556 #define vTraceSetStreamBufferName(object, name)
\r
2557 #define vTraceSetMessageBufferName(object, name)
\r
2559 #endif /* (TRC_USE_TRACEALYZER_RECORDER == 1) */
\r
2561 #ifdef __cplusplus
\r
2565 #endif /* TRC_KERNEL_PORT_H */
\r