heap_trace_standalone.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <string.h>
  7. #include <sdkconfig.h>
  8. #define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
  9. #include "esp_heap_trace.h"
  10. #undef HEAP_TRACE_SRCFILE
  11. #include "esp_attr.h"
  12. #include "freertos/FreeRTOS.h"
  13. #include "freertos/task.h"
  14. #define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
  15. #if CONFIG_HEAP_TRACING_STANDALONE
  16. static portMUX_TYPE trace_mux = portMUX_INITIALIZER_UNLOCKED;
  17. static bool tracing;
  18. static heap_trace_mode_t mode;
  19. /* Buffer used for records, starting at offset 0
  20. */
  21. static heap_trace_record_t *buffer;
  22. static size_t total_records;
  23. /* Count of entries logged in the buffer.
  24. Maximum total_records
  25. */
  26. static size_t count;
  27. /* Actual number of allocations logged */
  28. static size_t total_allocations;
  29. /* Actual number of frees logged */
  30. static size_t total_frees;
  31. /* Has the buffer overflowed and lost trace entries? */
  32. static bool has_overflowed = false;
  33. esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
  34. {
  35. if (tracing) {
  36. return ESP_ERR_INVALID_STATE;
  37. }
  38. buffer = record_buffer;
  39. total_records = num_records;
  40. memset(buffer, 0, num_records * sizeof(heap_trace_record_t));
  41. return ESP_OK;
  42. }
  43. esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
  44. {
  45. if (buffer == NULL || total_records == 0) {
  46. return ESP_ERR_INVALID_STATE;
  47. }
  48. portENTER_CRITICAL(&trace_mux);
  49. tracing = false;
  50. mode = mode_param;
  51. count = 0;
  52. total_allocations = 0;
  53. total_frees = 0;
  54. has_overflowed = false;
  55. heap_trace_resume();
  56. portEXIT_CRITICAL(&trace_mux);
  57. return ESP_OK;
  58. }
  59. static esp_err_t set_tracing(bool enable)
  60. {
  61. if (tracing == enable) {
  62. return ESP_ERR_INVALID_STATE;
  63. }
  64. tracing = enable;
  65. return ESP_OK;
  66. }
  67. esp_err_t heap_trace_stop(void)
  68. {
  69. return set_tracing(false);
  70. }
  71. esp_err_t heap_trace_resume(void)
  72. {
  73. return set_tracing(true);
  74. }
  75. size_t heap_trace_get_count(void)
  76. {
  77. return count;
  78. }
  79. esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
  80. {
  81. if (record == NULL) {
  82. return ESP_ERR_INVALID_STATE;
  83. }
  84. esp_err_t result = ESP_OK;
  85. portENTER_CRITICAL(&trace_mux);
  86. if (index >= count) {
  87. result = ESP_ERR_INVALID_ARG; /* out of range for 'count' */
  88. } else {
  89. memcpy(record, &buffer[index], sizeof(heap_trace_record_t));
  90. }
  91. portEXIT_CRITICAL(&trace_mux);
  92. return result;
  93. }
  94. void heap_trace_dump(void)
  95. {
  96. size_t delta_size = 0;
  97. size_t delta_allocs = 0;
  98. printf("%u allocations trace (%u entry buffer)\n",
  99. count, total_records);
  100. size_t start_count = count;
  101. for (int i = 0; i < count; i++) {
  102. heap_trace_record_t *rec = &buffer[i];
  103. if (rec->address != NULL) {
  104. printf("%d bytes (@ %p) allocated CPU %d ccount 0x%08x caller ",
  105. rec->size, rec->address, rec->ccount & 1, rec->ccount & ~3);
  106. for (int j = 0; j < STACK_DEPTH && rec->alloced_by[j] != 0; j++) {
  107. printf("%p%s", rec->alloced_by[j],
  108. (j < STACK_DEPTH - 1) ? ":" : "");
  109. }
  110. if (mode != HEAP_TRACE_ALL || STACK_DEPTH == 0 || rec->freed_by[0] == NULL) {
  111. delta_size += rec->size;
  112. delta_allocs++;
  113. printf("\n");
  114. } else {
  115. printf("\nfreed by ");
  116. for (int j = 0; j < STACK_DEPTH; j++) {
  117. printf("%p%s", rec->freed_by[j],
  118. (j < STACK_DEPTH - 1) ? ":" : "\n");
  119. }
  120. }
  121. }
  122. }
  123. if (mode == HEAP_TRACE_ALL) {
  124. printf("%u bytes alive in trace (%u/%u allocations)\n",
  125. delta_size, delta_allocs, heap_trace_get_count());
  126. } else {
  127. printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
  128. }
  129. printf("total allocations %u total frees %u\n", total_allocations, total_frees);
  130. if (start_count != count) { // only a problem if trace isn't stopped before dumping
  131. printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
  132. }
  133. if (has_overflowed) {
  134. printf("(NB: Buffer has overflowed, so trace data is incomplete.)\n");
  135. }
  136. }
  137. /* Add a new allocation to the heap trace records */
  138. static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
  139. {
  140. if (!tracing || record->address == NULL) {
  141. return;
  142. }
  143. portENTER_CRITICAL(&trace_mux);
  144. if (tracing) {
  145. if (count == total_records) {
  146. has_overflowed = true;
  147. /* Move the whole buffer back one slot.
  148. This is a bit slow, compared to treating this buffer as a ringbuffer and rotating a head pointer.
  149. However, ringbuffer code gets tricky when we remove elements in mid-buffer (for leak trace mode) while
  150. trying to keep track of an item count that may overflow.
  151. */
  152. memmove(&buffer[0], &buffer[1], sizeof(heap_trace_record_t) * (total_records -1));
  153. count--;
  154. }
  155. // Copy new record into place
  156. memcpy(&buffer[count], record, sizeof(heap_trace_record_t));
  157. count++;
  158. total_allocations++;
  159. }
  160. portEXIT_CRITICAL(&trace_mux);
  161. }
  162. // remove a record, used when freeing
  163. static void remove_record(int index);
  164. /* record a free event in the heap trace log
  165. For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
  166. For HEAP_TRACE_LEAKS, this means removing the record from the log.
  167. */
  168. static IRAM_ATTR void record_free(void *p, void **callers)
  169. {
  170. if (!tracing || p == NULL) {
  171. return;
  172. }
  173. portENTER_CRITICAL(&trace_mux);
  174. if (tracing && count > 0) {
  175. total_frees++;
  176. /* search backwards for the allocation record matching this free */
  177. int i;
  178. for (i = count - 1; i >= 0; i--) {
  179. if (buffer[i].address == p) {
  180. break;
  181. }
  182. }
  183. if (i >= 0) {
  184. if (mode == HEAP_TRACE_ALL) {
  185. memcpy(buffer[i].freed_by, callers, sizeof(void *) * STACK_DEPTH);
  186. } else { // HEAP_TRACE_LEAKS
  187. // Leak trace mode, once an allocation is freed we remove it from the list
  188. remove_record(i);
  189. }
  190. }
  191. }
  192. portEXIT_CRITICAL(&trace_mux);
  193. }
  194. /* remove the entry at 'index' from the ringbuffer of saved records */
  195. static IRAM_ATTR void remove_record(int index)
  196. {
  197. if (index < count - 1) {
  198. // Remove the buffer entry from the list
  199. memmove(&buffer[index], &buffer[index+1],
  200. sizeof(heap_trace_record_t) * (total_records - index - 1));
  201. } else {
  202. // For last element, just zero it out to avoid ambiguity
  203. memset(&buffer[index], 0, sizeof(heap_trace_record_t));
  204. }
  205. count--;
  206. }
  207. #include "heap_trace.inc"
  208. #endif /*CONFIG_HEAP_TRACING_STANDALONE*/