app_trace_membufs_proto.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. /*
  2. * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <sys/param.h>
  7. #include <string.h>
  8. #include "sdkconfig.h"
  9. #include "esp_log.h"
  10. #include "esp_cpu.h"
  11. #include "esp_app_trace_membufs_proto.h"
  12. /** Trace data header. Every user data chunk is prepended with this header.
  13. * User allocates block with esp_apptrace_buffer_get and then fills it with data,
  14. * in multithreading environment it can happen that tasks gets buffer and then gets interrupted,
  15. * so it is possible that user data are incomplete when memory block is exposed to the host.
  16. * In this case host SW will see that wr_sz < block_sz and will report error.
  17. */
  18. typedef struct {
  19. #if CONFIG_APPTRACE_SV_ENABLE
  20. uint8_t block_sz; // size of allocated block for user data
  21. uint8_t wr_sz; // size of actually written data
  22. #else
  23. uint16_t block_sz; // size of allocated block for user data
  24. uint16_t wr_sz; // size of actually written data
  25. #endif
  26. } esp_tracedata_hdr_t;
  27. /** TODO: docs
  28. */
  29. typedef struct {
  30. uint16_t block_sz; // size of allocated block for user data
  31. } esp_hostdata_hdr_t;
  32. #if CONFIG_APPTRACE_SV_ENABLE
  33. #define ESP_APPTRACE_USR_BLOCK_CORE(_cid_) (0)
  34. #define ESP_APPTRACE_USR_BLOCK_LEN(_v_) (_v_)
  35. #define ESP_APPTRACE_USR_DATA_LEN_MAX(_hw_data_) 255UL
  36. #else
  37. #define ESP_APPTRACE_USR_BLOCK_CORE(_cid_) ((_cid_) << 15)
  38. #define ESP_APPTRACE_USR_BLOCK_LEN(_v_) (~(1 << 15) & (_v_))
  39. #define ESP_APPTRACE_USR_DATA_LEN_MAX(_hw_data_) (ESP_APPTRACE_INBLOCK(_hw_data_)->sz - sizeof(esp_tracedata_hdr_t))
  40. #endif
  41. #define ESP_APPTRACE_USR_BLOCK_RAW_SZ(_s_) ((_s_) + sizeof(esp_tracedata_hdr_t))
  42. #define ESP_APPTRACE_INBLOCK_MARKER(_hw_data_) ((_hw_data_)->state.markers[(_hw_data_)->state.in_block % 2])
  43. #define ESP_APPTRACE_INBLOCK_MARKER_UPD(_hw_data_, _v_) do {(_hw_data_)->state.markers[(_hw_data_)->state.in_block % 2] += (_v_);}while(0)
  44. #define ESP_APPTRACE_INBLOCK(_hw_data_) (&(_hw_data_)->blocks[(_hw_data_)->state.in_block % 2])
  45. const static char *TAG = "esp_apptrace";
  46. static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size);
  47. esp_err_t esp_apptrace_membufs_init(esp_apptrace_membufs_proto_data_t *proto, const esp_apptrace_mem_block_t blocks_cfg[2])
  48. {
  49. // disabled by default
  50. esp_apptrace_rb_init(&proto->rb_down, NULL, 0);
  51. // membufs proto init
  52. for (unsigned i = 0; i < 2; i++) {
  53. proto->blocks[i].start = blocks_cfg[i].start;
  54. proto->blocks[i].sz = blocks_cfg[i].sz;
  55. proto->state.markers[i] = 0;
  56. }
  57. proto->state.in_block = 0;
  58. #if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
  59. esp_apptrace_rb_init(&proto->rb_pend, proto->pending_data,
  60. sizeof(proto->pending_data));
  61. #endif
  62. return ESP_OK;
  63. }
  64. void esp_apptrace_membufs_down_buffer_config(esp_apptrace_membufs_proto_data_t *data, uint8_t *buf, uint32_t size)
  65. {
  66. esp_apptrace_rb_init(&data->rb_down, buf, size);
  67. }
  68. // assumed to be protected by caller from multi-core/thread access
  69. static esp_err_t esp_apptrace_membufs_swap(esp_apptrace_membufs_proto_data_t *proto)
  70. {
  71. int prev_block_num = proto->state.in_block % 2;
  72. int new_block_num = prev_block_num ? (0) : (1);
  73. esp_err_t res = ESP_OK;
  74. res = proto->hw->swap_start(proto->state.in_block);
  75. if (res != ESP_OK) {
  76. return res;
  77. }
  78. proto->state.markers[new_block_num] = 0;
  79. // switch to new block
  80. proto->state.in_block++;
  81. proto->hw->swap(new_block_num);
  82. // handle data from host
  83. esp_hostdata_hdr_t *hdr = (esp_hostdata_hdr_t *)proto->blocks[new_block_num].start;
  84. // ESP_APPTRACE_LOGV("Host data %d, sz %d @ %p", proto->hw->host_data_pending(), hdr->block_sz, hdr);
  85. if (proto->hw->host_data_pending() && hdr->block_sz > 0) {
  86. // TODO: add support for multiple blocks from host, currently there is no need for that
  87. uint8_t *p = proto->blocks[new_block_num].start + proto->blocks[new_block_num].sz;
  88. ESP_APPTRACE_LOGD("Recvd %d bytes from host (@ 0x%x) [%x %x %x %x %x %x %x %x .. %x %x %x %x %x %x %x %x]",
  89. hdr->block_sz, proto->blocks[new_block_num].start,
  90. *(proto->blocks[new_block_num].start+0), *(proto->blocks[new_block_num].start+1),
  91. *(proto->blocks[new_block_num].start+2), *(proto->blocks[new_block_num].start+3),
  92. *(proto->blocks[new_block_num].start+4), *(proto->blocks[new_block_num].start+5),
  93. *(proto->blocks[new_block_num].start+6), *(proto->blocks[new_block_num].start+7),
  94. *(p-8), *(p-7), *(p-6), *(p-5), *(p-4), *(p-3), *(p-2), *(p-1));
  95. uint32_t sz = esp_apptrace_membufs_down_buffer_write_nolock(proto, (uint8_t *)(hdr+1), hdr->block_sz);
  96. if (sz != hdr->block_sz) {
  97. ESP_APPTRACE_LOGE("Failed to write %d bytes to down buffer (%d %d)!", hdr->block_sz - sz, hdr->block_sz, sz);
  98. }
  99. hdr->block_sz = 0;
  100. }
  101. #if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
  102. // copy pending data to block if any
  103. while (proto->state.markers[new_block_num] < proto->blocks[new_block_num].sz) {
  104. uint32_t read_sz = esp_apptrace_rb_read_size_get(&proto->rb_pend);
  105. if (read_sz == 0) {
  106. break; // no more data in pending buffer
  107. }
  108. if (read_sz > proto->blocks[new_block_num].sz - proto->state.markers[new_block_num]) {
  109. read_sz = proto->blocks[new_block_num].sz - proto->state.markers[new_block_num];
  110. }
  111. uint8_t *ptr = esp_apptrace_rb_consume(&proto->rb_pend, read_sz);
  112. if (!ptr) {
  113. assert(false && "Failed to consume pended bytes!!");
  114. break;
  115. }
  116. ESP_APPTRACE_LOGD("Pump %d pend bytes [%x %x %x %x : %x %x %x %x : %x %x %x %x : %x %x...%x %x]",
  117. read_sz, *(ptr+0), *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
  118. *(ptr+5), *(ptr+6), *(ptr+7), *(ptr+8), *(ptr+9), *(ptr+10), *(ptr+11), *(ptr+12), *(ptr+13), *(ptr+read_sz-2), *(ptr+read_sz-1));
  119. memcpy(proto->blocks[new_block_num].start + proto->state.markers[new_block_num], ptr, read_sz);
  120. proto->state.markers[new_block_num] += read_sz;
  121. }
  122. #endif
  123. proto->hw->swap_end(proto->state.in_block, proto->state.markers[prev_block_num]);
  124. return res;
  125. }
  126. static esp_err_t esp_apptrace_membufs_swap_waitus(esp_apptrace_membufs_proto_data_t *proto, esp_apptrace_tmo_t *tmo)
  127. {
  128. int res;
  129. while ((res = esp_apptrace_membufs_swap(proto)) != ESP_OK) {
  130. res = esp_apptrace_tmo_check(tmo);
  131. if (res != ESP_OK) {
  132. break;
  133. }
  134. }
  135. return res;
  136. }
  137. uint8_t *esp_apptrace_membufs_down_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t *size, esp_apptrace_tmo_t *tmo)
  138. {
  139. uint8_t *ptr = NULL;
  140. while (1) {
  141. uint32_t sz = esp_apptrace_rb_read_size_get(&proto->rb_down);
  142. if (sz != 0) {
  143. *size = MIN(*size, sz);
  144. ptr = esp_apptrace_rb_consume(&proto->rb_down, *size);
  145. if (!ptr) {
  146. assert(false && "Failed to consume bytes from down buffer!");
  147. }
  148. break;
  149. }
  150. // may need to flush
  151. if (proto->hw->host_data_pending()) {
  152. ESP_APPTRACE_LOGD("force flush");
  153. int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
  154. if (res != ESP_OK) {
  155. ESP_APPTRACE_LOGE("Failed to switch to another block to recv data from host!");
  156. /*do not return error because data can be in down buffer already*/
  157. }
  158. } else {
  159. // check tmo only if there is no data from host
  160. int res = esp_apptrace_tmo_check(tmo);
  161. if (res != ESP_OK) {
  162. return NULL;
  163. }
  164. }
  165. }
  166. return ptr;
  167. }
  168. esp_err_t esp_apptrace_membufs_down_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
  169. {
  170. /* nothing todo */
  171. return ESP_OK;
  172. }
  173. static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size)
  174. {
  175. uint32_t total_sz = 0;
  176. while (total_sz < size) {
  177. ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock WRS %d-%d-%d %d", proto->rb_down.wr, proto->rb_down.rd,
  178. proto->rb_down.cur_size, size);
  179. uint32_t wr_sz = esp_apptrace_rb_write_size_get(&proto->rb_down);
  180. if (wr_sz == 0) {
  181. break;
  182. }
  183. if (wr_sz > size - total_sz) {
  184. wr_sz = size - total_sz;
  185. }
  186. ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d", wr_sz);
  187. uint8_t *ptr = esp_apptrace_rb_produce(&proto->rb_down, wr_sz);
  188. if (!ptr) {
  189. assert(false && "Failed to produce bytes to down buffer!");
  190. }
  191. ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d to 0x%x from 0x%x", wr_sz, ptr, data + total_sz + wr_sz);
  192. memcpy(ptr, data + total_sz, wr_sz);
  193. total_sz += wr_sz;
  194. ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d/%d", wr_sz, total_sz);
  195. }
  196. return total_sz;
  197. }
  198. static inline uint8_t *esp_apptrace_membufs_wait4buf(esp_apptrace_membufs_proto_data_t *proto, uint16_t size, esp_apptrace_tmo_t *tmo, int *pended)
  199. {
  200. uint8_t *ptr = NULL;
  201. int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
  202. if (res != ESP_OK) {
  203. return NULL;
  204. }
  205. #if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
  206. // check if we still have pending data
  207. if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
  208. // if after block switch we still have pending data (not all pending data have been pumped to block)
  209. // alloc new pending buffer
  210. *pended = 1;
  211. ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
  212. if (!ptr) {
  213. ESP_APPTRACE_LOGE("Failed to alloc pend buf 1: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
  214. }
  215. } else
  216. #endif
  217. {
  218. // update block pointers
  219. if (ESP_APPTRACE_INBLOCK_MARKER(proto) + size > ESP_APPTRACE_INBLOCK(proto)->sz) {
  220. #if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
  221. *pended = 1;
  222. ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
  223. if (ptr == NULL) {
  224. ESP_APPTRACE_LOGE("Failed to alloc pend buf 2: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
  225. }
  226. #endif
  227. } else {
  228. *pended = 0;
  229. ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
  230. }
  231. }
  232. return ptr;
  233. }
  234. static inline uint8_t *esp_apptrace_membufs_pkt_start(uint8_t *ptr, uint16_t size)
  235. {
  236. // it is safe to use esp_cpu_get_core_id() in macro call because arg is used only once inside it
  237. ((esp_tracedata_hdr_t *)ptr)->block_sz = ESP_APPTRACE_USR_BLOCK_CORE(esp_cpu_get_core_id()) | size;
  238. ((esp_tracedata_hdr_t *)ptr)->wr_sz = 0;
  239. return ptr + sizeof(esp_tracedata_hdr_t);
  240. }
  241. static inline void esp_apptrace_membufs_pkt_end(uint8_t *ptr)
  242. {
  243. esp_tracedata_hdr_t *hdr = (esp_tracedata_hdr_t *)(ptr - sizeof(esp_tracedata_hdr_t));
  244. // update written size
  245. hdr->wr_sz = hdr->block_sz;
  246. }
  247. uint8_t *esp_apptrace_membufs_up_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t size, esp_apptrace_tmo_t *tmo)
  248. {
  249. uint8_t *buf_ptr = NULL;
  250. if (size > ESP_APPTRACE_USR_DATA_LEN_MAX(proto)) {
  251. ESP_APPTRACE_LOGE("Too large user data size %d!", size);
  252. return NULL;
  253. }
  254. // check for data in the pending buffer
  255. #if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
  256. if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
  257. // if we have buffered data try to switch block
  258. esp_apptrace_membufs_swap(proto);
  259. // if switch was successful, part or all pended data have been copied to block
  260. }
  261. if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
  262. // if we have buffered data alloc new pending buffer
  263. ESP_APPTRACE_LOGD("Get %d bytes from PEND buffer", size);
  264. buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
  265. if (buf_ptr == NULL) {
  266. int pended_buf;
  267. buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
  268. if (buf_ptr && !pended_buf) {
  269. ESP_APPTRACE_LOGD("Get %d bytes from block", size);
  270. // update cur block marker
  271. ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
  272. }
  273. }
  274. } else {
  275. #else
  276. if (1) {
  277. #endif
  278. if (ESP_APPTRACE_INBLOCK_MARKER(proto) + ESP_APPTRACE_USR_BLOCK_RAW_SZ(size) > ESP_APPTRACE_INBLOCK(proto)->sz) {
  279. #if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
  280. ESP_APPTRACE_LOGD("Block full. Get %d bytes from PEND buffer", size);
  281. buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
  282. #endif
  283. if (buf_ptr == NULL) {
  284. int pended_buf;
  285. ESP_APPTRACE_LOGD(" full. Get %d bytes from pend buffer", size);
  286. buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
  287. if (buf_ptr && !pended_buf) {
  288. ESP_APPTRACE_LOGD("Got %d bytes from block", size);
  289. // update cur block marker
  290. ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
  291. }
  292. }
  293. } else {
  294. ESP_APPTRACE_LOGD("Get %d bytes from buffer", size);
  295. // fit to curr nlock
  296. buf_ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
  297. // update cur block marker
  298. ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
  299. }
  300. }
  301. if (buf_ptr) {
  302. buf_ptr = esp_apptrace_membufs_pkt_start(buf_ptr, size);
  303. }
  304. return buf_ptr;
  305. }
  306. esp_err_t esp_apptrace_membufs_up_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
  307. {
  308. esp_apptrace_membufs_pkt_end(ptr);
  309. // TODO: mark block as busy in order not to re-use it for other tracing calls until it is completely written
  310. // TODO: avoid potential situation when all memory is consumed by low prio tasks which can not complete writing due to
  311. // higher prio tasks and the latter can not allocate buffers at all
  312. // this is abnormal situation can be detected on host which will receive only uncompleted buffers
  313. // workaround: use own memcpy which will kick-off dead tracing calls
  314. return ESP_OK;
  315. }
  316. esp_err_t esp_apptrace_membufs_flush_nolock(esp_apptrace_membufs_proto_data_t *proto, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
  317. {
  318. int res = ESP_OK;
  319. if (ESP_APPTRACE_INBLOCK_MARKER(proto) < min_sz) {
  320. ESP_APPTRACE_LOGI("Ignore flush request for min %d bytes. Bytes in block: %d.", min_sz, ESP_APPTRACE_INBLOCK_MARKER(proto));
  321. return ESP_OK;
  322. }
  323. // switch block while size of data (including that in pending buffer) is more than min size
  324. while (ESP_APPTRACE_INBLOCK_MARKER(proto) > min_sz) {
  325. ESP_APPTRACE_LOGD("Try to flush %d bytes. Wait until block switch for %lld us", ESP_APPTRACE_INBLOCK_MARKER(proto), tmo->tmo);
  326. res = esp_apptrace_membufs_swap_waitus(proto, tmo);
  327. if (res != ESP_OK) {
  328. if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE)
  329. ESP_APPTRACE_LOGW("Failed to switch to another block in %lld us!", tmo->tmo);
  330. else
  331. ESP_APPTRACE_LOGE("Failed to switch to another block in %lld us!", tmo->tmo);
  332. return res;
  333. }
  334. }
  335. return res;
  336. }