app_trace.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. // Copyright 2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // Hot It Works
  15. // ************
  16. // 1. Components Overview
  17. // ======================
  18. // Xtensa has useful feature: TRAX debug module. It allows recording program execution flow during run-time without disturbing CPU commands flow.
  19. // Exectution flow data are written to configurable Trace RAM block. Besides accessing Trace RAM itself TRAX module also allows to read/write
  20. // trace memory via its registers by means of JTAG, APB or ERI transactions.
  21. // ESP32 has two Xtensa cores with separate TRAX modules on them and provides two special memory regions to be used as trace memory.
  22. // ESP32 allows muxing access to trace memory blocks in such a way that while one block is accessed by CPUs another can be accessed via JTAG by host
  23. // via reading/writing TRAX registers. Block muxing is configurable at run-time and allows switching trace memory blocks between
  24. // accessors in round-robin fashion so they can read/write separate memory blocks without disturbing each other.
  25. // This moduile implements application tracing feature based on above mechanisms. This feature allows to transfer arbitrary user data to
  26. // host via JTAG with minimal impact on system performance. This module is implied to be used in the following tracing scheme.
  27. // ------>------ ----- (host components) -----
  28. // | | | |
  29. // --------------- ----------------------- ----------------------- ---------------- ------ --------- -----------------
  30. // |apptrace user|-->|target tracing module|<--->|TRAX_MEM0 | TRAX_MEM1|---->|TRAX_DATA_REGS|<-->|JTAG|<--->|OpenOCD|-->|trace data file|
  31. // --------------- ----------------------- ----------------------- ---------------- ------ --------- -----------------
  32. // | | | |
  33. // | ------<------ ---------------- |
  34. // |<------------------------------------------->|TRAX_CTRL_REGS|<---->|
  35. // ----------------
  36. // In general tracing happens in the following way. User aplication requests tracing module to send some data by calling esp_apptrace_buffer_get(),
  37. // moduile allocates necessary buffer in current input trace block. Then user fills received buffer with data and calls esp_apptrace_buffer_put().
  38. // When current input trace block is filled with app data it is exposed to host and the second block becomes input one and buffer filling restarts.
  39. // While target application fills one memory block host reads another block via JTAG.
  40. // To control buffer switching and for other communication purposes this implementation uses some TRAX registers. It is safe since HW TRAX tracing
  41. // can not be used along with application tracing feature so these registers are freely readable/writeable via JTAG from host and via ERI from ESP32 cores.
  42. // So this implementation's target CPU overhead is produced only by calls to allocate/manage buffers and data copying.
  43. // On host special OpenOCD command must be used to read trace data.
  44. // 2.1.1.1 TRAX Registers layout
  45. // =============================
  46. // This module uses two TRAX HW registers to communicate with host SW (OpenOCD).
  47. // - Control register uses TRAX_DELAYCNT as storage. Only lower 24 bits of TRAX_DELAYCNT are writable. Control register has the following bitfields:
  48. // | 31..XXXXXX..24 | 23 .(host_connect). 23| 22..(block_id)..15 | 14..(block_len)..0 |
  49. // 14..0 bits - actual length of user data in trace memory block. Target updates it every time it fills memory block and exposes it to host.
  50. // Host writes zero to this field when it finishes reading exposed block;
  51. // 22..15 bits - trace memory block transfer ID. Block counter. It can overflow. Updated by target, host should not modify it. Actually can be 1-2 bits;
  52. // 23 bit - 'host connected' flag. If zero then host is not connected and tracing module works in post-mortem mode, otherwise in streaming mode;
  53. // - Status register uses TRAX_TRIGGERPC as storage. If this register is not zero then currentlly CPU is changing TRAX registers and
  54. // this register holds address of the instruction which application will execute when it finishes with those registers modifications.
  55. // See 'Targets Connection' setion for details.
  56. // 3. Modes of operation
  57. // =====================
  58. // This module supports two modes of operation:
  59. // - Post-mortem mode. This is the default mode. In this mode application tracing module does not check whether host has read all the data from block
  60. // exposed to it and switches block in any case. The mode does not need host interaction for operation and so can be useful when only the latest
  61. // trace data are necessary, e.g. for analyzing crashes. On panic the latest data from current input block are exposed to host and host can read them.
  62. // There is menuconfig option CONFIG_ESP32_APPTRACE_ONPANIC_HOST_FLUSH_TRAX_THRESH which control the threshold for flushing data on panic.
  63. // - Streaming mode. Tracing module enters this mode when host connects to targets and sets respective bit in control register. In this mode tracing
  64. // module waits for specified time until host read all the data from exposed block.
  65. // On panic tracing module waits (timeout is configured via menuconfig via ESP32_APPTRACE_ONPANIC_HOST_FLUSH_TMO) for the host to read all data
  66. // from the previously exposed block.
  67. // 4. Communication Protocol
  68. // =========================
  69. // 4.1 Trace Memory Blocks
  70. // ^^^^^^^^^^^^^^^^^^^^^^^^^
  71. // Communication is controlled via special register. Host periodically polls control register on each core to find out if there are any data avalable.
  72. // When current input trace memory block is filled tracing module exposes block to host and updates block_len and block_id fields in control register.
  73. // Host reads new register value and according to it starts reading data from exposed block. Meanwhile target starts filling another trace block.
  74. // When host finishes reading the block it clears block_len field in control register indicating to target that it is ready to accept the next block.
  75. // 4.2 User Data Chunks Level
  76. // --------------------------
  77. // Since trace memory block is shared between user data chunks and data copying is performed on behalf of the API user (in its normal context) in
  78. // multithreading environment it can happen that task/ISR which copies data is preempted by another high prio task/ISR. So it is possible situation
  79. // that task/ISR will fail to complete filling its data chunk before the whole trace block is exposed to the host. To handle such conditions tracing
  80. // module prepends all user data chunks with 4 bytes header which contains allocated buffer size and actual data length within it. OpenOCD command
  81. // which reads application traces will report error when it will read incompleted user data block.
  82. // 4.3 Targets Connection/Disconnection
  83. // ------------------------------------
  84. // When host is going to start tracing in streaming mode it needs to put both ESP32 cores into initial state when 'host connected' bit is set
  85. // on both cores. To accomplish this host halts both cores and sets this bit in TRAX registers. But target code can be halted in state when it has read control
  86. // register but has not updated its value. To handle such situations target code indicates to the host that it is updating control register by writing
  87. // non-zero value to status register. Actually it writes address of the instruction which it will execute when it finishes with
  88. // the registers update. When target is halted during control register update host sets breakpoint at the address from status register and resumes CPU.
  89. // After target code finishes with register update it is halted on breakpoint, host detects it and safely sets 'host connected' bit. When both cores
  90. // are set up they are resumed. Tracing starts without further intrusion into CPUs work.
  91. // When host is going to stop tracing in streaming mode it needs to disconnect targets. Disconnection process is done using the same algorithm
  92. // as for connecting, but 'host connected' bits are cleared on ESP32 cores.
  93. // 5. Module Access Synchronization
  94. // ================================
  95. // Access to internal module's data is synchronized with custom mutex. Mutex is a wrapper for portMUX_TYPE and uses almost the same sync mechanism as in
  96. // vPortCPUAcquireMutex/vPortCPUReleaseMutex. The mechanism uses S32C1I Xtensa instruction to implement exclusive access to module's data from tasks and
  97. // ISRs running on both cores. Also custom mutex allows specifying timeout for locking operation. Locking routine checks underlaying mutex in cycle until
  98. // it gets its ownership or timeout expires. The differences of application tracing module's mutex implementation from vPortCPUAcquireMutex/vPortCPUReleaseMutex are:
  99. // - Support for timeouts.
  100. // - Local IRQs for CPU which owns the mutex are disabled till the call to unlocking routine. This is made to avoid possible task's prio inversion.
  101. // When low prio task takes mutex and enables local IRQs gets preempted by high prio task which in its turn can try to acquire mutex using infinite timeout.
  102. // So no local task switch occurs when mutex is locked. But this does not apply to tasks on another CPU.
  103. // WARNING: Priority inversion can happen when low prio task works on one CPU and medium and high prio tasks work on another.
  104. // There are some differences how mutex behaves when it is used from task and ISR context when timeout is non-zero:
  105. // - In task context when mutex can not be locked portYIELD() is called before check for timeout condition to alow othet tasks work on the same CPU.
  106. // - In ISR context when mutex can not be locked nothing is done before expired time check.
  107. // WARNING: Care must be taken when selecting timeout values for trace calls from ISRs. Tracing module does not care about watchdogs when waiting on internal locks
  108. // and when waiting for host to complete previous block reading, so if wating timeout value exceedes watchdog's one it can lead to system reboot.
  109. // 6. Timeouts
  110. // ------------
  111. // Timeout mechanism is based on xthal_get_ccount() routine and supports timeout values in micorseconds.
  112. // There are two situations when task/ISR can be delayed by tracing API call. Timeout mechanism takes into account both conditions:
  113. // - Trace data are locked by another task/ISR. When wating on trace data lock.
  114. // - Current TRAX memory input block is full when working in streaming mode (host is connected). When waiting for host to complete previous block reading.
  115. // When wating for any of above conditions xthal_get_ccount() is called periodically to calculate time elapsed from trace API routine entry. When elapsed
  116. // time exceeds specified timeout value operation is canceled and ESP_ERR_TIMEOUT code is returned.
  117. // ALSO SEE example usage of application tracing module in 'components/log/README.rst'
  118. #include <string.h>
  119. #include "soc/soc.h"
  120. #include "soc/dport_reg.h"
  121. #include "eri.h"
  122. #include "trax.h"
  123. #include "freertos/FreeRTOS.h"
  124. #include "freertos/portmacro.h"
  125. #include "freertos/semphr.h"
  126. #include "freertos/task.h"
  127. #include "soc/timer_group_struct.h"
  128. #include "soc/timer_group_reg.h"
  129. #include "esp_app_trace.h"
  130. #if CONFIG_ESP32_APPTRACE_ENABLE
  131. #define ESP_APPTRACE_DEBUG_STATS_ENABLE 0
  132. #define ESP_APPTRACE_BUF_HISTORY_DEPTH (16*100)
  133. #define ESP_APPTRACE_MAX_VPRINTF_ARGS 256
  134. #define ESP_APPTRACE_PRINT_LOCK_NONE 0
  135. #define ESP_APPTRACE_PRINT_LOCK_SEM 1
  136. #define ESP_APPTRACE_PRINT_LOCK_MUX 2
  137. #define ESP_APPTRACE_PRINT_LOCK ESP_APPTRACE_PRINT_LOCK_NONE//ESP_APPTRACE_PRINT_LOCK_SEM
  138. #define ESP_APPTRACE_USE_LOCK_SEM 0 // 1 - semaphore (now may be broken), 0 - portMUX_TYPE
  139. #define LOG_LOCAL_LEVEL ESP_LOG_VERBOSE
  140. #include "esp_log.h"
  141. const static char *TAG = "esp_apptrace";
  142. #if ESP_APPTRACE_PRINT_LOCK != ESP_APPTRACE_PRINT_LOCK_NONE
  143. #define ESP_APPTRACE_LOG( format, ... ) \
  144. do { \
  145. esp_apptrace_log_lock(); \
  146. ets_printf(format, ##__VA_ARGS__); \
  147. esp_apptrace_log_unlock(); \
  148. } while(0)
  149. #else
  150. #define ESP_APPTRACE_LOG( format, ... ) \
  151. do { \
  152. ets_printf(format, ##__VA_ARGS__); \
  153. } while(0)
  154. #endif
  155. #define ESP_APPTRACE_LOG_LEV( _L_, level, format, ... ) \
  156. do { \
  157. if (LOG_LOCAL_LEVEL >= level) { \
  158. ESP_APPTRACE_LOG(LOG_FORMAT(_L_, format), esp_log_early_timestamp(), TAG, ##__VA_ARGS__); \
  159. } \
  160. } while(0)
  161. #define ESP_APPTRACE_LOGE( format, ... ) ESP_APPTRACE_LOG_LEV(E, ESP_LOG_ERROR, format, ##__VA_ARGS__)
  162. #define ESP_APPTRACE_LOGW( format, ... ) ESP_APPTRACE_LOG_LEV(W, ESP_LOG_WARN, format, ##__VA_ARGS__)
  163. #define ESP_APPTRACE_LOGI( format, ... ) ESP_APPTRACE_LOG_LEV(I, ESP_LOG_INFO, format, ##__VA_ARGS__)
  164. #define ESP_APPTRACE_LOGD( format, ... ) ESP_APPTRACE_LOG_LEV(D, ESP_LOG_DEBUG, format, ##__VA_ARGS__)
  165. #define ESP_APPTRACE_LOGV( format, ... ) ESP_APPTRACE_LOG_LEV(V, ESP_LOG_VERBOSE, format, ##__VA_ARGS__)
  166. #define ESP_APPTRACE_LOGO( format, ... ) ESP_APPTRACE_LOG_LEV(E, ESP_LOG_NONE, format, ##__VA_ARGS__)
  167. #define ESP_APPTRACE_CPUTICKS2US(_t_) ((_t_)/(XT_CLOCK_FREQ/1000000))
  168. // TODO: move these (and same definitions in trax.c to dport_reg.h)
  169. #define TRACEMEM_MUX_PROBLK0_APPBLK1 0
  170. #define TRACEMEM_MUX_BLK0_ONLY 1
  171. #define TRACEMEM_MUX_BLK1_ONLY 2
  172. #define TRACEMEM_MUX_PROBLK1_APPBLK0 3
  173. // TRAX is disabled, so we use its registers for our own purposes
  174. // | 31..XXXXXX..24 | 23 .(host_connect). 23| 22..(block_id)..15 | 14..(block_len)..0 |
  175. #define ESP_APPTRACE_TRAX_CTRL_REG ERI_TRAX_DELAYCNT
  176. #define ESP_APPTRACE_TRAX_STAT_REG ERI_TRAX_TRIGGERPC
  177. #define ESP_APPTRACE_TRAX_BLOCK_LEN_MSK 0x7FFFUL
  178. #define ESP_APPTRACE_TRAX_BLOCK_LEN(_l_) ((_l_) & ESP_APPTRACE_TRAX_BLOCK_LEN_MSK)
  179. #define ESP_APPTRACE_TRAX_BLOCK_LEN_GET(_v_) ((_v_) & ESP_APPTRACE_TRAX_BLOCK_LEN_MSK)
  180. #define ESP_APPTRACE_TRAX_BLOCK_ID_MSK 0xFFUL
  181. #define ESP_APPTRACE_TRAX_BLOCK_ID(_id_) (((_id_) & ESP_APPTRACE_TRAX_BLOCK_ID_MSK) << 15)
  182. #define ESP_APPTRACE_TRAX_BLOCK_ID_GET(_v_) (((_v_) >> 15) & ESP_APPTRACE_TRAX_BLOCK_ID_MSK)
  183. #define ESP_APPTRACE_TRAX_HOST_CONNECT (1 << 23)
  184. static volatile uint8_t *s_trax_blocks[] = {
  185. (volatile uint8_t *) 0x3FFFC000,
  186. (volatile uint8_t *) 0x3FFF8000
  187. };
  188. #define ESP_APPTRACE_TRAX_BLOCKS_NUM (sizeof(s_trax_blocks)/sizeof(s_trax_blocks[0]))
  189. //#define ESP_APPTRACE_TRAX_BUFFER_SIZE (ESP_APPTRACE_TRAX_BLOCK_SIZE/4)
  190. #define ESP_APPTRACE_TRAX_INBLOCK_START 0//(ESP_APPTRACE_TRAX_BLOCK_ID_MSK - 4)
  191. #define ESP_APPTRACE_TRAX_INBLOCK_MARKER_PTR_GET() (&s_trace_buf.trax.state.markers[s_trace_buf.trax.state.in_block % 2])
  192. #define ESP_APPTRACE_TRAX_INBLOCK_GET() (&s_trace_buf.trax.blocks[s_trace_buf.trax.state.in_block % 2])
  193. #if ESP_APPTRACE_DEBUG_STATS_ENABLE == 1
  194. /** keeps info about apptrace API (write/get buffer) caller and internal module's data related to that call
  195. * NOTE: used for module debug purposes, currently this functionality is partially broken,
  196. * but can be useful in future
  197. */
  198. typedef struct {
  199. uint32_t hnd; // task/ISR handle
  200. uint32_t ts; // timestamp
  201. uint32_t stamp; // test (user) trace buffer stamp
  202. uint32_t in_block; // TRAX input block ID
  203. uint32_t eri_len[2]; // contents of ERI control register upon entry to / exit from API routine
  204. uint32_t wr_err; // number of trace write errors
  205. } esp_trace_buffer_wr_hitem_t;
  206. /** apptrace API calls history. History is organized as ring buffer*/
  207. typedef struct {
  208. uint32_t hist_rd; // the first history entry index
  209. uint32_t hist_wr; // the last history entry index
  210. esp_trace_buffer_wr_hitem_t hist[ESP_APPTRACE_BUF_HISTORY_DEPTH]; // history data
  211. } esp_trace_buffer_wr_stats_t;
  212. /** trace module stats */
  213. typedef struct {
  214. esp_trace_buffer_wr_stats_t wr;
  215. } esp_trace_buffer_stats_t;
  216. #endif
  217. /** Trace data header. Every user data chunk is prepended with this header.
  218. * User allocates block with esp_apptrace_buffer_get and then fills it with data,
  219. * in multithreading environment it can happen that tasks gets buffer and then gets interrupted,
  220. * so it is possible that user data are incomplete when TRAX memory block is exposed to the host.
  221. * In this case host SW will see that wr_sz < block_sz and will report error.
  222. */
  223. typedef struct {
  224. uint16_t block_sz; // size of allocated block for user data
  225. uint16_t wr_sz; // size of actually written data
  226. } esp_tracedata_hdr_t;
  227. /** TRAX HW transport state */
  228. typedef struct {
  229. uint32_t in_block; // input block ID
  230. uint32_t markers[ESP_APPTRACE_TRAX_BLOCKS_NUM]; // block filling level markers
  231. #if ESP_APPTRACE_DEBUG_STATS_ENABLE == 1
  232. esp_trace_buffer_stats_t stats; // stats
  233. #endif
  234. } esp_apptrace_trax_state_t;
  235. /** memory block parameters */
  236. typedef struct {
  237. uint8_t *start; // start address
  238. uint32_t sz; // size
  239. } esp_apptrace_mem_block_t;
  240. /** TRAX HW transport data */
  241. typedef struct {
  242. volatile esp_apptrace_trax_state_t state; // state
  243. esp_apptrace_mem_block_t blocks[ESP_APPTRACE_TRAX_BLOCKS_NUM]; // memory blocks
  244. } esp_apptrace_trax_data_t;
  245. /** tracing module synchronization lock */
  246. typedef struct {
  247. volatile unsigned int irq_stat; // local (on 1 CPU) IRQ state
  248. portMUX_TYPE portmux; // mux for synchronization
  249. } esp_apptrace_lock_t;
  250. #define ESP_APPTRACE_MUX_GET(_m_) (&(_m_)->portmux)
  251. /** tracing module internal data */
  252. typedef struct {
  253. #if ESP_APPTRACE_USE_LOCK_SEM == 1
  254. SemaphoreHandle_t lock;
  255. #else
  256. esp_apptrace_lock_t lock; // sync lock
  257. #endif
  258. uint8_t inited; // module initialization state flag
  259. esp_apptrace_trax_data_t trax; // TRAX HW transport data
  260. } esp_apptrace_buffer_t;
  261. /** waiting timeout data */
  262. typedef struct {
  263. uint32_t start; // waiting start (in ticks)
  264. uint32_t tmo; // timeout (in us)
  265. } esp_apptrace_tmo_t;
  266. static esp_apptrace_buffer_t s_trace_buf;
  267. #if ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_SEM
  268. static SemaphoreHandle_t s_log_lock;
  269. #elif ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_MUX
  270. static esp_apptrace_lock_t s_log_lock;
  271. #endif
  272. static inline void esp_apptrace_tmo_init(esp_apptrace_tmo_t *tmo, uint32_t user_tmo)
  273. {
  274. tmo->start = xthal_get_ccount();
  275. tmo->tmo = user_tmo;
  276. }
  277. static esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
  278. {
  279. unsigned cur, elapsed;
  280. if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE) {
  281. cur = xthal_get_ccount();
  282. if (tmo->start <= cur) {
  283. elapsed = cur - tmo->start;
  284. } else {
  285. elapsed = 0xFFFFFFFF - tmo->start + cur;
  286. }
  287. if (ESP_APPTRACE_CPUTICKS2US(elapsed) >= tmo->tmo) {
  288. return ESP_ERR_TIMEOUT;
  289. }
  290. }
  291. return ESP_OK;
  292. }
  293. #if ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_MUX || ESP_APPTRACE_USE_LOCK_SEM == 0
  294. static inline void esp_apptrace_mux_init(esp_apptrace_lock_t *mux)
  295. {
  296. ESP_APPTRACE_MUX_GET(mux)->mux = portMUX_FREE_VAL;
  297. mux->irq_stat = 0;
  298. }
  299. static esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *mux, uint32_t tmo)
  300. {
  301. uint32_t res = ~portMUX_FREE_VAL;
  302. esp_apptrace_tmo_t sleeping_tmo;
  303. esp_apptrace_tmo_init(&sleeping_tmo, tmo);
  304. while (1) {
  305. res = (xPortGetCoreID() << portMUX_VAL_SHIFT) | portMUX_MAGIC_VAL;
  306. // first disable IRQs on this CPU, this will prevent current task from been
  307. // preempted by higher prio tasks, otherwise deadlock can happen:
  308. // when lower prio task took mux and then preempted by higher prio one which also tries to
  309. // get mux with INFINITE timeout
  310. unsigned int irq_stat = portENTER_CRITICAL_NESTED();
  311. // Now try to lock mux
  312. uxPortCompareSet(&ESP_APPTRACE_MUX_GET(mux)->mux, portMUX_FREE_VAL, &res);
  313. if (res == portMUX_FREE_VAL) {
  314. // do not enable IRQs, we will held them disabled until mux is unlocked
  315. // we do not need to flush cache region for mux->irq_stat because it is used
  316. // to hold and restore IRQ state only for CPU which took mux, other CPUs will not use this value
  317. mux->irq_stat = irq_stat;
  318. break;
  319. }
  320. // if mux is locked by other task/ISR enable IRQs and let other guys work
  321. portEXIT_CRITICAL_NESTED(irq_stat);
  322. if (!xPortInIsrContext()) {
  323. portYIELD();
  324. }
  325. int err = esp_apptrace_tmo_check(&sleeping_tmo);
  326. if (err != ESP_OK) {
  327. return err;
  328. }
  329. }
  330. return ESP_OK;
  331. }
  332. esp_err_t esp_apptrace_mux_give(esp_apptrace_lock_t *mux)
  333. {
  334. esp_err_t ret = ESP_OK;
  335. uint32_t res = 0;
  336. unsigned int irq_stat;
  337. res = portMUX_FREE_VAL;
  338. // first of all save a copy of IRQ status for this locker because uxPortCompareSet will unlock mux and tasks/ISRs
  339. // from other core can overwrite mux->irq_stat
  340. irq_stat = mux->irq_stat;
  341. uxPortCompareSet(&ESP_APPTRACE_MUX_GET(mux)->mux, (xPortGetCoreID() << portMUX_VAL_SHIFT) | portMUX_MAGIC_VAL, &res);
  342. // enable local interrupts
  343. portEXIT_CRITICAL_NESTED(irq_stat);
  344. if ( ((res & portMUX_VAL_MASK) >> portMUX_VAL_SHIFT) == xPortGetCoreID() ) {
  345. // nothing to do
  346. } else if ( res == portMUX_FREE_VAL ) {
  347. ret = ESP_FAIL; // should never get here
  348. } else {
  349. ret = ESP_FAIL; // should never get here
  350. }
  351. return ret;
  352. }
  353. #endif
  354. static inline esp_err_t esp_apptrace_log_init()
  355. {
  356. #if ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_SEM
  357. s_log_lock = xSemaphoreCreateBinary();
  358. if (!s_log_lock) {
  359. ets_printf("%s: Failed to create print lock sem!", TAG);
  360. return ESP_FAIL;
  361. }
  362. xSemaphoreGive(s_log_lock);
  363. #elif ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_MUX
  364. esp_apptrace_mux_init(&s_log_lock);
  365. #endif
  366. return ESP_OK;
  367. }
  368. static inline void esp_apptrace_log_cleanup()
  369. {
  370. #if ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_SEM
  371. vSemaphoreDelete(s_log_lock);
  372. #endif
  373. }
  374. static inline int esp_apptrace_log_lock()
  375. {
  376. #if ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_SEM
  377. BaseType_t ret;
  378. if (xPortInIsrContext()) {
  379. ret = xSemaphoreTakeFromISR(s_print_lock, NULL);
  380. } else {
  381. ret = xSemaphoreTake(s_print_lock, portMAX_DELAY);
  382. }
  383. return ret;
  384. #elif ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_MUX
  385. int ret = esp_apptrace_lock_take(&s_log_lock, ESP_APPTRACE_TMO_INFINITE);
  386. return ret;
  387. #endif
  388. return 0;
  389. }
  390. static inline void esp_apptrace_log_unlock()
  391. {
  392. #if ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_SEM
  393. if (xPortInIsrContext()) {
  394. xSemaphoreGiveFromISR(s_log_lock, NULL);
  395. } else {
  396. xSemaphoreGive(s_log_lock);
  397. }
  398. #elif ESP_APPTRACE_PRINT_LOCK == ESP_APPTRACE_PRINT_LOCK_MUX
  399. esp_apptrace_mux_give(&s_log_lock);
  400. #endif
  401. }
  402. esp_err_t esp_apptrace_lock_init()
  403. {
  404. #if ESP_APPTRACE_USE_LOCK_SEM == 1
  405. s_trace_buf.lock = xSemaphoreCreateBinary();
  406. if (!s_trace_buf.lock) {
  407. ESP_APPTRACE_LOGE("Failed to create lock!");
  408. return ESP_FAIL;
  409. }
  410. xSemaphoreGive(s_trace_buf.lock);
  411. #else
  412. esp_apptrace_mux_init(&s_trace_buf.lock);
  413. #endif
  414. return ESP_OK;
  415. }
  416. esp_err_t esp_apptrace_lock_cleanup()
  417. {
  418. #if ESP_APPTRACE_USE_LOCK_SEM == 1
  419. vSemaphoreDelete(s_trace_buf.lock);
  420. #endif
  421. return ESP_OK;
  422. }
  423. esp_err_t esp_apptrace_lock(uint32_t *tmo)
  424. {
  425. unsigned cur, elapsed, start = xthal_get_ccount();
  426. #if ESP_APPTRACE_USE_LOCK_SEM == 1
  427. BaseType_t ret;
  428. if (xPortInIsrContext()) {
  429. ret = xSemaphoreTakeFromISR(s_trace_buf.lock, NULL);
  430. } else {
  431. ret = xSemaphoreTake(s_trace_buf.lock, portTICK_PERIOD_MS * (*tmo) / 1000);
  432. }
  433. if (ret != pdTRUE) {
  434. return ESP_FAIL;
  435. }
  436. #else
  437. esp_err_t ret = esp_apptrace_lock_take(&s_trace_buf.lock, *tmo);
  438. if (ret != ESP_OK) {
  439. return ESP_FAIL;
  440. }
  441. #endif
  442. // decrease tmo by actual waiting time
  443. cur = xthal_get_ccount();
  444. if (start <= cur) {
  445. elapsed = cur - start;
  446. } else {
  447. elapsed = ULONG_MAX - start + cur;
  448. }
  449. if (ESP_APPTRACE_CPUTICKS2US(elapsed) > *tmo) {
  450. *tmo = 0;
  451. } else {
  452. *tmo -= ESP_APPTRACE_CPUTICKS2US(elapsed);
  453. }
  454. return ESP_OK;
  455. }
  456. esp_err_t esp_apptrace_unlock()
  457. {
  458. esp_err_t ret = ESP_OK;
  459. #if ESP_APPTRACE_USE_LOCK_SEM == 1
  460. if (xPortInIsrContext()) {
  461. xSemaphoreGiveFromISR(s_trace_buf.lock, NULL);
  462. } else {
  463. xSemaphoreGive(s_trace_buf.lock);
  464. }
  465. #else
  466. ret = esp_apptrace_mux_give(&s_trace_buf.lock);
  467. #endif
  468. return ret;
  469. }
  470. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  471. static void esp_apptrace_trax_init()
  472. {
  473. // Stop trace, if any (on the current CPU)
  474. eri_write(ERI_TRAX_TRAXCTRL, TRAXCTRL_TRSTP);
  475. eri_write(ERI_TRAX_TRAXCTRL, TRAXCTRL_TMEN);
  476. eri_write(ESP_APPTRACE_TRAX_CTRL_REG, ESP_APPTRACE_TRAX_BLOCK_ID(ESP_APPTRACE_TRAX_INBLOCK_START));
  477. eri_write(ESP_APPTRACE_TRAX_STAT_REG, 0);
  478. ESP_APPTRACE_LOGI("Initialized TRAX on CPU%d", xPortGetCoreID());
  479. }
  480. // assumed to be protected by caller from multi-core/thread access
  481. static esp_err_t esp_apptrace_trax_block_switch()
  482. {
  483. int prev_block_num = s_trace_buf.trax.state.in_block % 2;
  484. int new_block_num = prev_block_num ? (0) : (1);
  485. int res = ESP_OK;
  486. extern uint32_t __esp_apptrace_trax_eri_updated;
  487. // indicate to host that we are about to update.
  488. // this is used only to place CPU into streaming mode at tracing startup
  489. // before starting streaming host can halt us after we read ESP_APPTRACE_TRAX_CTRL_REG and before we updated it
  490. // HACK: in this case host will set breakpoint just after ESP_APPTRACE_TRAX_CTRL_REG update,
  491. // here we set address to set bp at
  492. // enter ERI update critical section
  493. eri_write(ESP_APPTRACE_TRAX_STAT_REG, (uint32_t)&__esp_apptrace_trax_eri_updated);
  494. uint32_t ctrl_reg = eri_read(ESP_APPTRACE_TRAX_CTRL_REG);
  495. #if ESP_APPTRACE_DEBUG_STATS_ENABLE == 1
  496. if (s_trace_buf.state.stats.wr.hist_wr < ESP_APPTRACE_BUF_HISTORY_DEPTH) {
  497. esp_trace_buffer_wr_hitem_t *hi = (esp_trace_buffer_wr_hitem_t *)&s_trace_buf.state.stats.wr.hist[s_trace_buf.state.stats.wr.hist_wr - 1];
  498. hi->eri_len[1] = ctrl_reg;
  499. }
  500. #endif
  501. uint32_t host_connected = ESP_APPTRACE_TRAX_HOST_CONNECT & ctrl_reg;
  502. if (host_connected) {
  503. uint32_t acked_block = ESP_APPTRACE_TRAX_BLOCK_ID_GET(ctrl_reg);
  504. uint32_t host_to_read = ESP_APPTRACE_TRAX_BLOCK_LEN_GET(ctrl_reg);
  505. if (host_to_read != 0 || acked_block != (s_trace_buf.trax.state.in_block & ESP_APPTRACE_TRAX_BLOCK_ID_MSK)) {
  506. // ESP_APPTRACE_LOGE("HC[%d]: Can not switch %x %d %x %x/%lx", xPortGetCoreID(), ctrl_reg, host_to_read, acked_block,
  507. // s_trace_buf.trax.state.in_block & ESP_APPTRACE_TRAX_BLOCK_ID_MSK, s_trace_buf.trax.state.in_block);
  508. res = ESP_ERR_NO_MEM;
  509. goto _on_func_exit;
  510. }
  511. }
  512. s_trace_buf.trax.state.markers[new_block_num] = 0;
  513. // switch to new block
  514. s_trace_buf.trax.state.in_block++;
  515. WRITE_PERI_REG(DPORT_TRACEMEM_MUX_MODE_REG, new_block_num ? TRACEMEM_MUX_BLK0_ONLY : TRACEMEM_MUX_BLK1_ONLY);
  516. eri_write(ESP_APPTRACE_TRAX_CTRL_REG, ESP_APPTRACE_TRAX_BLOCK_ID(s_trace_buf.trax.state.in_block) |
  517. host_connected | ESP_APPTRACE_TRAX_BLOCK_LEN(s_trace_buf.trax.state.markers[prev_block_num]));
  518. _on_func_exit:
  519. // exit ERI update critical section
  520. eri_write(ESP_APPTRACE_TRAX_STAT_REG, 0x0);
  521. asm volatile (
  522. " .global __esp_apptrace_trax_eri_updated\n"
  523. "__esp_apptrace_trax_eri_updated:\n"); // host will set bp here to resolve collision at streaming start
  524. return res;
  525. }
  526. static esp_err_t esp_apptrace_trax_block_switch_waitus(uint32_t tmo)
  527. {
  528. int res;
  529. esp_apptrace_tmo_t sleeping_tmo;
  530. esp_apptrace_tmo_init(&sleeping_tmo, tmo);
  531. while ((res = esp_apptrace_trax_block_switch()) != ESP_OK) {
  532. res = esp_apptrace_tmo_check(&sleeping_tmo);
  533. if (res != ESP_OK) {
  534. break;
  535. }
  536. }
  537. return res;
  538. }
  539. static uint8_t *esp_apptrace_trax_get_buffer(size_t size, uint32_t *tmo)
  540. {
  541. uint8_t *buf_ptr = NULL;
  542. volatile uint32_t *cur_block_marker;
  543. esp_apptrace_mem_block_t *cur_block;
  544. int res = esp_apptrace_lock(tmo);
  545. if (res != ESP_OK) {
  546. return NULL;
  547. }
  548. #if ESP_APPTRACE_DEBUG_STATS_ENABLE == 1
  549. esp_trace_buffer_wr_hitem_t *hi = NULL;
  550. if (s_trace_buf.state.stats.wr.hist_wr < ESP_APPTRACE_BUF_HISTORY_DEPTH) {
  551. hi = (esp_trace_buffer_wr_hitem_t *)&s_trace_buf.state.stats.wr.hist[s_trace_buf.state.stats.wr.hist_wr++];
  552. hi->hnd = *(uint32_t *)(buf + 0);
  553. hi->ts = *(uint32_t *)(buf + sizeof(uint32_t));
  554. hi->stamp = *(buf + 2 * sizeof(uint32_t));
  555. hi->in_block = s_trace_buf.state.in_block;
  556. hi->wr_err = 0;
  557. hi->eri_len[0] = eri_read(ESP_APPTRACE_TRAX_CTRL_REG);
  558. if (s_trace_buf.state.stats.wr.hist_wr == ESP_APPTRACE_BUF_HISTORY_DEPTH) {
  559. s_trace_buf.state.stats.wr.hist_wr = 0;
  560. }
  561. if (s_trace_buf.state.stats.wr.hist_wr == s_trace_buf.state.stats.wr.hist_rd) {
  562. s_trace_buf.state.stats.wr.hist_rd++;
  563. if (s_trace_buf.state.stats.wr.hist_rd == ESP_APPTRACE_BUF_HISTORY_DEPTH) {
  564. s_trace_buf.state.stats.wr.hist_rd = 0;
  565. }
  566. }
  567. }
  568. #endif
  569. cur_block_marker = ESP_APPTRACE_TRAX_INBLOCK_MARKER_PTR_GET();
  570. cur_block = ESP_APPTRACE_TRAX_INBLOCK_GET();
  571. if (*cur_block_marker + size + sizeof(esp_tracedata_hdr_t) >= cur_block->sz) {
  572. // flush data, we can not unlock apptrace until we have buffer for all user data
  573. // otherwise other tasks/ISRs can get control and write their data between chunks of this data
  574. res = esp_apptrace_trax_block_switch_waitus(/*size + sizeof(esp_tracedata_hdr_t),*/*tmo);
  575. if (res != ESP_OK) {
  576. if (esp_apptrace_unlock() != ESP_OK) {
  577. ESP_APPTRACE_LOGE("Failed to unlock apptrace data!");
  578. // there is a bug, should never get here
  579. }
  580. return NULL;
  581. }
  582. // we switched to new block, update TRAX block pointers
  583. cur_block_marker = ESP_APPTRACE_TRAX_INBLOCK_MARKER_PTR_GET();
  584. cur_block = ESP_APPTRACE_TRAX_INBLOCK_GET();
  585. }
  586. buf_ptr = cur_block->start + *cur_block_marker;
  587. ((esp_tracedata_hdr_t *)buf_ptr)->block_sz = size;
  588. ((esp_tracedata_hdr_t *)buf_ptr)->wr_sz = 0;
  589. *cur_block_marker += size + sizeof(esp_tracedata_hdr_t);
  590. // now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
  591. if (esp_apptrace_unlock() != ESP_OK) {
  592. ESP_APPTRACE_LOGE("Failed to unlock apptrace data!");
  593. // there is a bug, should never get here
  594. }
  595. return buf_ptr + sizeof(esp_tracedata_hdr_t);
  596. }
  597. static esp_err_t esp_apptrace_trax_put_buffer(uint8_t *ptr, uint32_t *tmo)
  598. {
  599. int res = ESP_OK;
  600. esp_tracedata_hdr_t *hdr = (esp_tracedata_hdr_t *)(ptr - sizeof(esp_tracedata_hdr_t));
  601. // update written size
  602. hdr->wr_sz = hdr->block_sz;
  603. // TODO: mark block as busy in order not to re-use it for other tracing calls until it is completely written
  604. // TODO: avoid potential situation when all memory is consumed by low prio tasks which can not complete writing due to
  605. // higher prio tasks and the latter can not allocate buffers at all
  606. // this is abnormal situation can be detected on host which will receive only uncompleted buffers
  607. // workaround: use own memcpy which will kick-off dead tracing calls
  608. return res;
  609. }
  610. static esp_err_t esp_apptrace_trax_flush(uint32_t min_sz, uint32_t tmo)
  611. {
  612. volatile uint32_t *in_block_marker;
  613. int res = ESP_OK;
  614. in_block_marker = ESP_APPTRACE_TRAX_INBLOCK_MARKER_PTR_GET();
  615. if (*in_block_marker > min_sz) {
  616. ESP_APPTRACE_LOGD("Wait until block switch for %u us", tmo);
  617. res = esp_apptrace_trax_block_switch_waitus(/*0 query any size,*/tmo);
  618. if (res != ESP_OK) {
  619. ESP_APPTRACE_LOGE("Failed to switch to another block");
  620. return res;
  621. }
  622. ESP_APPTRACE_LOGD("Flushed last block %u bytes", *in_block_marker);
  623. *in_block_marker = 0;
  624. }
  625. return res;
  626. }
  627. static esp_err_t esp_apptrace_trax_dest_init()
  628. {
  629. for (int i = 0; i < ESP_APPTRACE_TRAX_BLOCKS_NUM; i++) {
  630. s_trace_buf.trax.blocks[i].start = (uint8_t *)s_trax_blocks[i];
  631. s_trace_buf.trax.blocks[i].sz = ESP_APPTRACE_TRAX_BLOCK_SIZE;
  632. s_trace_buf.trax.state.markers[i] = 0;
  633. }
  634. s_trace_buf.trax.state.in_block = ESP_APPTRACE_TRAX_INBLOCK_START;
  635. WRITE_PERI_REG(DPORT_PRO_TRACEMEM_ENA_REG, DPORT_PRO_TRACEMEM_ENA_M);
  636. #if CONFIG_FREERTOS_UNICORE == 0
  637. WRITE_PERI_REG(DPORT_APP_TRACEMEM_ENA_REG, DPORT_APP_TRACEMEM_ENA_M);
  638. #endif
  639. // Expose block 1 to host, block 0 is current trace input buffer
  640. WRITE_PERI_REG(DPORT_TRACEMEM_MUX_MODE_REG, TRACEMEM_MUX_BLK1_ONLY);
  641. return ESP_OK;
  642. }
  643. #endif
  644. esp_err_t esp_apptrace_init()
  645. {
  646. int res;
  647. if (!s_trace_buf.inited) {
  648. res = esp_apptrace_log_init();
  649. if (res != ESP_OK) {
  650. ets_printf("%s: Failed to init log lock (%d)!", TAG, res);
  651. return res;
  652. }
  653. //memset(&s_trace_buf, 0, sizeof(s_trace_buf));
  654. res = esp_apptrace_lock_init(&s_trace_buf.lock);
  655. if (res != ESP_OK) {
  656. ESP_APPTRACE_LOGE("Failed to init log lock (%d)!", res);
  657. esp_apptrace_log_cleanup();
  658. return res;
  659. }
  660. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  661. res = esp_apptrace_trax_dest_init();
  662. if (res != ESP_OK) {
  663. ESP_APPTRACE_LOGE("Failed to init TRAX dest data (%d)!", res);
  664. esp_apptrace_lock_cleanup();
  665. esp_apptrace_log_cleanup();
  666. return res;
  667. }
  668. #endif
  669. }
  670. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  671. // init TRAX on this CPU
  672. esp_apptrace_trax_init();
  673. #endif
  674. s_trace_buf.inited |= 1 << xPortGetCoreID(); // global and this CPU-specific data are inited
  675. return ESP_OK;
  676. }
  677. esp_err_t esp_apptrace_write(esp_apptrace_dest_t dest, void *data, size_t size, uint32_t user_tmo)
  678. {
  679. uint8_t *ptr = NULL;
  680. uint32_t tmo = user_tmo;
  681. //TODO: use ptr to HW transport iface struct
  682. uint8_t *(*apptrace_get_buffer)(size_t, uint32_t *);
  683. esp_err_t (*apptrace_put_buffer)(uint8_t *, uint32_t *);
  684. if (dest == ESP_APPTRACE_DEST_TRAX) {
  685. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  686. apptrace_get_buffer = esp_apptrace_trax_get_buffer;
  687. apptrace_put_buffer = esp_apptrace_trax_put_buffer;
  688. #else
  689. ESP_APPTRACE_LOGE("Application tracing via TRAX is disabled in menuconfig!");
  690. return ESP_ERR_NOT_SUPPORTED;
  691. #endif
  692. } else {
  693. ESP_APPTRACE_LOGE("Trace destinations other then TRAX are not supported yet!");
  694. return ESP_ERR_NOT_SUPPORTED;
  695. }
  696. ptr = apptrace_get_buffer(size, &tmo);
  697. if (ptr == NULL) {
  698. //ESP_APPTRACE_LOGE("Failed to get buffer!");
  699. return ESP_ERR_NO_MEM;
  700. }
  701. // actually can be suspended here by higher prio tasks/ISRs
  702. //TODO: use own memcpy with dead trace calls kick-off algo, and tmo expiration check
  703. memcpy(ptr, data, size);
  704. // now indicate that this buffer is ready to be sent off to host
  705. return apptrace_put_buffer(ptr, &tmo);
  706. }
  707. int esp_apptrace_vprintf_to(esp_apptrace_dest_t dest, uint32_t user_tmo, const char *fmt, va_list ap)
  708. {
  709. uint16_t nargs = 0;
  710. uint8_t *pout, *p = (uint8_t *)fmt;
  711. uint32_t tmo = user_tmo;
  712. //TODO: use ptr to HW transport iface struct
  713. uint8_t *(*apptrace_get_buffer)(size_t, uint32_t *);
  714. esp_err_t (*apptrace_put_buffer)(uint8_t *, uint32_t *);
  715. if (dest == ESP_APPTRACE_DEST_TRAX) {
  716. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  717. apptrace_get_buffer = esp_apptrace_trax_get_buffer;
  718. apptrace_put_buffer = esp_apptrace_trax_put_buffer;
  719. #else
  720. ESP_APPTRACE_LOGE("Application tracing via TRAX is disabled in menuconfig!");
  721. return ESP_ERR_NOT_SUPPORTED;
  722. #endif
  723. } else {
  724. ESP_APPTRACE_LOGE("Trace destinations other then TRAX are not supported yet!");
  725. return ESP_ERR_NOT_SUPPORTED;
  726. }
  727. // ESP_APPTRACE_LOGI("fmt %x", fmt);
  728. while ((p = (uint8_t *)strchr((char *)p, '%')) && nargs < ESP_APPTRACE_MAX_VPRINTF_ARGS) {
  729. p++;
  730. if (*p != '%' && *p != 0) {
  731. nargs++;
  732. }
  733. }
  734. // ESP_APPTRACE_LOGI("nargs = %d", nargs);
  735. if (p) {
  736. ESP_APPTRACE_LOGE("Failed to store all printf args!");
  737. }
  738. pout = apptrace_get_buffer(1 + sizeof(char *) + nargs * sizeof(uint32_t), &tmo);
  739. if (pout == NULL) {
  740. ESP_APPTRACE_LOGE("Failed to get buffer!");
  741. return -1;
  742. }
  743. p = pout;
  744. *pout = nargs;
  745. pout++;
  746. *(const char **)pout = fmt;
  747. pout += sizeof(char *);
  748. while (nargs-- > 0) {
  749. uint32_t arg = va_arg(ap, uint32_t);
  750. *(uint32_t *)pout = arg;
  751. pout += sizeof(uint32_t);
  752. // ESP_APPTRACE_LOGI("arg %x", arg);
  753. }
  754. int ret = apptrace_put_buffer(p, &tmo);
  755. if (ret != ESP_OK) {
  756. ESP_APPTRACE_LOGE("Failed to put printf buf (%d)!", ret);
  757. return -1;
  758. }
  759. return (pout - p);
  760. }
  761. int esp_apptrace_vprintf(const char *fmt, va_list ap)
  762. {
  763. return esp_apptrace_vprintf_to(ESP_APPTRACE_DEST_TRAX, /*ESP_APPTRACE_TMO_INFINITE*/0, fmt, ap);
  764. }
  765. uint8_t *esp_apptrace_buffer_get(esp_apptrace_dest_t dest, size_t size, uint32_t user_tmo)
  766. {
  767. uint32_t tmo = user_tmo;
  768. //TODO: use ptr to HW transport iface struct
  769. uint8_t *(*apptrace_get_buffer)(size_t, uint32_t *);
  770. if (dest == ESP_APPTRACE_DEST_TRAX) {
  771. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  772. apptrace_get_buffer = esp_apptrace_trax_get_buffer;
  773. #else
  774. ESP_APPTRACE_LOGE("Application tracing via TRAX is disabled in menuconfig!");
  775. return NULL;
  776. #endif
  777. } else {
  778. ESP_APPTRACE_LOGE("Trace destinations other then TRAX are not supported yet!");
  779. return NULL;
  780. }
  781. return apptrace_get_buffer(size, &tmo);
  782. }
  783. esp_err_t esp_apptrace_buffer_put(esp_apptrace_dest_t dest, uint8_t *ptr, uint32_t user_tmo)
  784. {
  785. uint32_t tmo = user_tmo;
  786. //TODO: use ptr to HW transport iface struct
  787. esp_err_t (*apptrace_put_buffer)(uint8_t *, uint32_t *);
  788. if (dest == ESP_APPTRACE_DEST_TRAX) {
  789. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  790. apptrace_put_buffer = esp_apptrace_trax_put_buffer;
  791. #else
  792. ESP_APPTRACE_LOGE("Application tracing via TRAX is disabled in menuconfig!");
  793. return ESP_ERR_NOT_SUPPORTED;
  794. #endif
  795. } else {
  796. ESP_APPTRACE_LOGE("Trace destinations other then TRAX are not supported yet!");
  797. return ESP_ERR_NOT_SUPPORTED;
  798. }
  799. return apptrace_put_buffer(ptr, &tmo);
  800. }
  801. esp_err_t esp_apptrace_flush_nolock(esp_apptrace_dest_t dest, uint32_t min_sz, uint32_t tmo)
  802. {
  803. //TODO: use ptr to HW transport iface struct
  804. esp_err_t (*apptrace_flush)(uint32_t, uint32_t);
  805. if (dest == ESP_APPTRACE_DEST_TRAX) {
  806. #if CONFIG_ESP32_APPTRACE_DEST_TRAX
  807. apptrace_flush = esp_apptrace_trax_flush;
  808. #else
  809. ESP_APPTRACE_LOGE("Application tracing via TRAX is disabled in menuconfig!");
  810. return ESP_ERR_NOT_SUPPORTED;
  811. #endif
  812. } else {
  813. ESP_APPTRACE_LOGE("Trace destinations other then TRAX are not supported yet!");
  814. return ESP_ERR_NOT_SUPPORTED;
  815. }
  816. return apptrace_flush(min_sz, tmo);
  817. }
  818. esp_err_t esp_apptrace_flush(esp_apptrace_dest_t dest, uint32_t tmo)
  819. {
  820. int res;
  821. res = esp_apptrace_lock(&tmo);
  822. if (res != ESP_OK) {
  823. ESP_APPTRACE_LOGE("Failed to lock apptrace data (%d)!", res);
  824. return res;
  825. }
  826. res = esp_apptrace_flush_nolock(dest, 0, tmo);
  827. if (res != ESP_OK) {
  828. ESP_APPTRACE_LOGE("Failed to fluch apptrace data (%d)!", res);
  829. }
  830. if (esp_apptrace_unlock() != ESP_OK) {
  831. ESP_APPTRACE_LOGE("Failed to unlock apptrace data (%d)!", res);
  832. }
  833. return res;
  834. }
  835. #if ESP_APPTRACE_DEBUG_STATS_ENABLE == 1
  836. void esp_apptrace_print_stats()
  837. {
  838. uint32_t i;
  839. uint32_t tmo = ESP_APPTRACE_TMO_INFINITE;
  840. esp_apptrace_lock(&tmo);
  841. for (i = s_trace_buf.state.stats.wr.hist_rd; (i < s_trace_buf.state.stats.wr.hist_wr) && (i < ESP_APPTRACE_BUF_HISTORY_DEPTH); i++) {
  842. esp_trace_buffer_wr_hitem_t *hi = (esp_trace_buffer_wr_hitem_t *)&s_trace_buf.state.stats.wr.hist[i];
  843. ESP_APPTRACE_LOGO("hist[%u] = {%x, %x}", i, hi->hnd, hi->ts);
  844. }
  845. if (i == ESP_APPTRACE_BUF_HISTORY_DEPTH) {
  846. for (i = 0; i < s_trace_buf.state.stats.wr.hist_wr; i++) {
  847. esp_trace_buffer_wr_hitem_t *hi = (esp_trace_buffer_wr_hitem_t *)&s_trace_buf.state.stats.wr.hist[i];
  848. ESP_APPTRACE_LOGO("hist[%u] = {%x, %x}", i, hi->hnd, hi->ts);
  849. }
  850. }
  851. esp_apptrace_unlock();
  852. }
  853. #endif
  854. #endif