heap_trace.inc 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include <string.h>
  14. #include <sdkconfig.h>
  15. #include "soc/soc_memory_layout.h"
  16. #include "esp_attr.h"
  17. #include "esp_cpu.h"
  18. #include "esp_macros.h"
  19. /* Encode the CPU ID in the LSB of the ccount value */
  20. inline static uint32_t get_ccount(void)
  21. {
  22. uint32_t ccount = esp_cpu_get_cycle_count() & ~3;
  23. #ifndef CONFIG_FREERTOS_UNICORE
  24. ccount |= xPortGetCoreID();
  25. #endif
  26. return ccount;
  27. }
  28. /* Architecture-specific return value of __builtin_return_address which
  29. * should be interpreted as an invalid address.
  30. */
  31. #ifdef __XTENSA__
  32. #define HEAP_ARCH_INVALID_PC 0x40000000
  33. #else
  34. #define HEAP_ARCH_INVALID_PC 0x00000000
  35. #endif
  36. // Caller is 2 stack frames deeper than we care about
  37. #define STACK_OFFSET 2
  38. #define TEST_STACK(N) do { \
  39. if (STACK_DEPTH == N) { \
  40. return; \
  41. } \
  42. callers[N] = __builtin_return_address(N+STACK_OFFSET); \
  43. if (!esp_ptr_executable(callers[N]) \
  44. || callers[N] == (void*) HEAP_ARCH_INVALID_PC) { \
  45. callers[N] = 0; \
  46. return; \
  47. } \
  48. } while(0)
  49. /* Static function to read the call stack for a traced heap call.
  50. Calls to __builtin_return_address are "unrolled" via TEST_STACK macro as gcc requires the
  51. argument to be a compile-time constant.
  52. */
  53. static HEAP_IRAM_ATTR __attribute__((noinline)) void get_call_stack(void **callers)
  54. {
  55. bzero(callers, sizeof(void *) * STACK_DEPTH);
  56. TEST_STACK(0);
  57. TEST_STACK(1);
  58. TEST_STACK(2);
  59. TEST_STACK(3);
  60. TEST_STACK(4);
  61. TEST_STACK(5);
  62. TEST_STACK(6);
  63. TEST_STACK(7);
  64. TEST_STACK(8);
  65. TEST_STACK(9);
  66. TEST_STACK(10);
  67. TEST_STACK(11);
  68. TEST_STACK(12);
  69. TEST_STACK(13);
  70. TEST_STACK(14);
  71. TEST_STACK(15);
  72. TEST_STACK(16);
  73. TEST_STACK(17);
  74. TEST_STACK(18);
  75. TEST_STACK(19);
  76. TEST_STACK(20);
  77. TEST_STACK(21);
  78. TEST_STACK(22);
  79. TEST_STACK(23);
  80. TEST_STACK(24);
  81. TEST_STACK(25);
  82. TEST_STACK(26);
  83. TEST_STACK(27);
  84. TEST_STACK(28);
  85. TEST_STACK(29);
  86. TEST_STACK(30);
  87. TEST_STACK(31);
  88. }
  89. ESP_STATIC_ASSERT(STACK_DEPTH >= 0 && STACK_DEPTH <= 32, "CONFIG_HEAP_TRACING_STACK_DEPTH must be in range 0-32");
  90. typedef enum {
  91. TRACE_MALLOC_CAPS,
  92. TRACE_MALLOC_DEFAULT
  93. } trace_malloc_mode_t;
  94. void *__real_heap_caps_malloc(size_t size, uint32_t caps);
  95. void *__real_heap_caps_malloc_default( size_t size );
  96. void *__real_heap_caps_realloc_default( void *ptr, size_t size );
  97. /* trace any 'malloc' event */
  98. static HEAP_IRAM_ATTR __attribute__((noinline)) void *trace_malloc(size_t size, uint32_t caps, trace_malloc_mode_t mode)
  99. {
  100. uint32_t ccount = get_ccount();
  101. void *p;
  102. if ( mode == TRACE_MALLOC_CAPS ) {
  103. p = __real_heap_caps_malloc(size, caps);
  104. } else { //TRACE_MALLOC_DEFAULT
  105. p = __real_heap_caps_malloc_default(size);
  106. }
  107. heap_trace_record_t rec = {
  108. .address = p,
  109. .ccount = ccount,
  110. .size = size,
  111. };
  112. get_call_stack(rec.alloced_by);
  113. record_allocation(&rec);
  114. return p;
  115. }
  116. void __real_heap_caps_free(void *p);
  117. /* trace any 'free' event */
  118. static HEAP_IRAM_ATTR __attribute__((noinline)) void trace_free(void *p)
  119. {
  120. void *callers[STACK_DEPTH];
  121. get_call_stack(callers);
  122. record_free(p, callers);
  123. __real_heap_caps_free(p);
  124. }
  125. void * __real_heap_caps_realloc(void *p, size_t size, uint32_t caps);
  126. /* trace any 'realloc' event */
  127. static HEAP_IRAM_ATTR __attribute__((noinline)) void *trace_realloc(void *p, size_t size, uint32_t caps, trace_malloc_mode_t mode)
  128. {
  129. void *callers[STACK_DEPTH];
  130. uint32_t ccount = get_ccount();
  131. void *r;
  132. /* trace realloc as free-then-alloc */
  133. get_call_stack(callers);
  134. record_free(p, callers);
  135. if (mode == TRACE_MALLOC_CAPS ) {
  136. r = __real_heap_caps_realloc(p, size, caps);
  137. } else { //TRACE_MALLOC_DEFAULT
  138. r = __real_heap_caps_realloc_default(p, size);
  139. }
  140. /* realloc with zero size is a free */
  141. if (size != 0) {
  142. heap_trace_record_t rec = {
  143. .address = r,
  144. .ccount = ccount,
  145. .size = size,
  146. };
  147. memcpy(rec.alloced_by, callers, sizeof(void *) * STACK_DEPTH);
  148. record_allocation(&rec);
  149. }
  150. return r;
  151. }
  152. /* Note: this changes the behaviour of libc malloc/realloc/free a bit,
  153. as they no longer go via the libc functions in ROM. But more or less
  154. the same in the end. */
  155. HEAP_IRAM_ATTR void *__wrap_malloc(size_t size)
  156. {
  157. return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
  158. }
  159. HEAP_IRAM_ATTR void __wrap_free(void *p)
  160. {
  161. trace_free(p);
  162. }
  163. HEAP_IRAM_ATTR void *__wrap_realloc(void *p, size_t size)
  164. {
  165. return trace_realloc(p, size, 0, TRACE_MALLOC_DEFAULT);
  166. }
  167. HEAP_IRAM_ATTR void *__wrap_calloc(size_t nmemb, size_t size)
  168. {
  169. size = size * nmemb;
  170. void *result = trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
  171. if (result != NULL) {
  172. memset(result, 0, size);
  173. }
  174. return result;
  175. }
  176. HEAP_IRAM_ATTR void *__wrap_heap_caps_malloc(size_t size, uint32_t caps)
  177. {
  178. return trace_malloc(size, caps, TRACE_MALLOC_CAPS);
  179. }
  180. void __wrap_heap_caps_free(void *p) __attribute__((alias("__wrap_free")));
  181. HEAP_IRAM_ATTR void *__wrap_heap_caps_realloc(void *p, size_t size, uint32_t caps)
  182. {
  183. return trace_realloc(p, size, caps, TRACE_MALLOC_CAPS);
  184. }
  185. HEAP_IRAM_ATTR void *__wrap_heap_caps_malloc_default( size_t size )
  186. {
  187. return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
  188. }
  189. HEAP_IRAM_ATTR void *__wrap_heap_caps_realloc_default( void *ptr, size_t size )
  190. {
  191. return trace_realloc(ptr, size, 0, TRACE_MALLOC_DEFAULT);
  192. }