heap_trace.inc 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. #include <string.h>
  14. #include <sdkconfig.h>
  15. #include "soc/soc_memory_layout.h"
  16. #include "esp_attr.h"
  17. #include "esp_cpu.h"
  18. /* Encode the CPU ID in the LSB of the ccount value */
  19. inline static uint32_t get_ccount(void)
  20. {
  21. uint32_t ccount = esp_cpu_get_cycle_count() & ~3;
  22. #ifndef CONFIG_FREERTOS_UNICORE
  23. ccount |= xPortGetCoreID();
  24. #endif
  25. return ccount;
  26. }
  27. /* Architecture-specific return value of __builtin_return_address which
  28. * should be interpreted as an invalid address.
  29. */
  30. #ifdef __XTENSA__
  31. #define HEAP_ARCH_INVALID_PC 0x40000000
  32. #else
  33. #define HEAP_ARCH_INVALID_PC 0x00000000
  34. #endif
  35. // Caller is 2 stack frames deeper than we care about
  36. #define STACK_OFFSET 2
  37. #define TEST_STACK(N) do { \
  38. if (STACK_DEPTH == N) { \
  39. return; \
  40. } \
  41. callers[N] = __builtin_return_address(N+STACK_OFFSET); \
  42. if (!esp_ptr_executable(callers[N]) \
  43. || callers[N] == (void*) HEAP_ARCH_INVALID_PC) { \
  44. callers[N] = 0; \
  45. return; \
  46. } \
  47. } while(0)
  48. /* Static function to read the call stack for a traced heap call.
  49. Calls to __builtin_return_address are "unrolled" via TEST_STACK macro as gcc requires the
  50. argument to be a compile-time constant.
  51. */
  52. static IRAM_ATTR __attribute__((noinline)) void get_call_stack(void **callers)
  53. {
  54. bzero(callers, sizeof(void *) * STACK_DEPTH);
  55. TEST_STACK(0);
  56. TEST_STACK(1);
  57. TEST_STACK(2);
  58. TEST_STACK(3);
  59. TEST_STACK(4);
  60. TEST_STACK(5);
  61. TEST_STACK(6);
  62. TEST_STACK(7);
  63. TEST_STACK(8);
  64. TEST_STACK(9);
  65. }
  66. _Static_assert(STACK_DEPTH >= 0 && STACK_DEPTH <= 10, "CONFIG_HEAP_TRACING_STACK_DEPTH must be in range 0-10");
  67. typedef enum {
  68. TRACE_MALLOC_CAPS,
  69. TRACE_MALLOC_DEFAULT
  70. } trace_malloc_mode_t;
  71. void *__real_heap_caps_malloc(size_t size, uint32_t caps);
  72. void *__real_heap_caps_malloc_default( size_t size );
  73. void *__real_heap_caps_realloc_default( void *ptr, size_t size );
  74. /* trace any 'malloc' event */
  75. static IRAM_ATTR __attribute__((noinline)) void *trace_malloc(size_t size, uint32_t caps, trace_malloc_mode_t mode)
  76. {
  77. uint32_t ccount = get_ccount();
  78. void *p;
  79. if ( mode == TRACE_MALLOC_CAPS ) {
  80. p = __real_heap_caps_malloc(size, caps);
  81. } else { //TRACE_MALLOC_DEFAULT
  82. p = __real_heap_caps_malloc_default(size);
  83. }
  84. heap_trace_record_t rec = {
  85. .address = p,
  86. .ccount = ccount,
  87. .size = size,
  88. };
  89. get_call_stack(rec.alloced_by);
  90. record_allocation(&rec);
  91. return p;
  92. }
  93. void __real_heap_caps_free(void *p);
  94. /* trace any 'free' event */
  95. static IRAM_ATTR __attribute__((noinline)) void trace_free(void *p)
  96. {
  97. void *callers[STACK_DEPTH];
  98. get_call_stack(callers);
  99. record_free(p, callers);
  100. __real_heap_caps_free(p);
  101. }
  102. void * __real_heap_caps_realloc(void *p, size_t size, uint32_t caps);
  103. /* trace any 'realloc' event */
  104. static IRAM_ATTR __attribute__((noinline)) void *trace_realloc(void *p, size_t size, uint32_t caps, trace_malloc_mode_t mode)
  105. {
  106. void *callers[STACK_DEPTH];
  107. uint32_t ccount = get_ccount();
  108. void *r;
  109. /* trace realloc as free-then-alloc */
  110. get_call_stack(callers);
  111. record_free(p, callers);
  112. if (mode == TRACE_MALLOC_CAPS ) {
  113. r = __real_heap_caps_realloc(p, size, caps);
  114. } else { //TRACE_MALLOC_DEFAULT
  115. r = __real_heap_caps_realloc_default(p, size);
  116. }
  117. /* realloc with zero size is a free */
  118. if (size != 0) {
  119. heap_trace_record_t rec = {
  120. .address = r,
  121. .ccount = ccount,
  122. .size = size,
  123. };
  124. memcpy(rec.alloced_by, callers, sizeof(void *) * STACK_DEPTH);
  125. record_allocation(&rec);
  126. }
  127. return r;
  128. }
  129. /* Note: this changes the behaviour of libc malloc/realloc/free a bit,
  130. as they no longer go via the libc functions in ROM. But more or less
  131. the same in the end. */
  132. IRAM_ATTR void *__wrap_malloc(size_t size)
  133. {
  134. return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
  135. }
  136. IRAM_ATTR void __wrap_free(void *p)
  137. {
  138. trace_free(p);
  139. }
  140. IRAM_ATTR void *__wrap_realloc(void *p, size_t size)
  141. {
  142. return trace_realloc(p, size, 0, TRACE_MALLOC_DEFAULT);
  143. }
  144. IRAM_ATTR void *__wrap_calloc(size_t nmemb, size_t size)
  145. {
  146. size = size * nmemb;
  147. void *result = trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
  148. if (result != NULL) {
  149. memset(result, 0, size);
  150. }
  151. return result;
  152. }
  153. IRAM_ATTR void *__wrap_heap_caps_malloc(size_t size, uint32_t caps)
  154. {
  155. return trace_malloc(size, caps, TRACE_MALLOC_CAPS);
  156. }
  157. void __wrap_heap_caps_free(void *p) __attribute__((alias("__wrap_free")));
  158. IRAM_ATTR void *__wrap_heap_caps_realloc(void *p, size_t size, uint32_t caps)
  159. {
  160. return trace_realloc(p, size, caps, TRACE_MALLOC_CAPS);
  161. }
  162. IRAM_ATTR void *__wrap_heap_caps_malloc_default( size_t size )
  163. {
  164. return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
  165. }
  166. IRAM_ATTR void *__wrap_heap_caps_realloc_default( void *ptr, size_t size )
  167. {
  168. return trace_realloc(ptr, size, 0, TRACE_MALLOC_DEFAULT);
  169. }