log.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. /*
  14. * Log library — implementation notes.
  15. *
  16. * Log library stores all tags provided to esp_log_level_set as a linked
  17. * list. See uncached_tag_entry_t structure.
  18. *
  19. * To avoid looking up log level for given tag each time message is
  20. * printed, this library caches pointers to tags. Because the suggested
  21. * way of creating tags uses one 'TAG' constant per file, this caching
  22. * should be effective. Cache is a binary min-heap of cached_tag_entry_t
  23. * items, ordering is done on 'generation' member. In this context,
  24. * generation is an integer which is incremented each time an operation
  25. * with cache is performed. When cache is full, new item is inserted in
  26. * place of an oldest item (that is, with smallest 'generation' value).
  27. * After that, bubble-down operation is performed to fix ordering in the
  28. * min-heap.
  29. *
  30. * The potential problem with wrap-around of cache generation counter is
  31. * ignored for now. This will happen if someone happens to output more
  32. * than 4 billion log entries, at which point wrap-around will not be
  33. * the biggest problem.
  34. *
  35. */
  36. #ifndef BOOTLOADER_BUILD
  37. #include <freertos/FreeRTOS.h>
  38. #include <freertos/FreeRTOSConfig.h>
  39. #include <freertos/task.h>
  40. #include <freertos/semphr.h>
  41. #endif
  42. #include "esp_attr.h"
  43. #include "xtensa/hal.h"
  44. #include "soc/soc.h"
  45. #include <stdbool.h>
  46. #include <stdarg.h>
  47. #include <string.h>
  48. #include <stdlib.h>
  49. #include <stdio.h>
  50. #include <assert.h>
  51. #include "esp_log.h"
  52. #ifndef BOOTLOADER_BUILD
  53. // Number of tags to be cached. Must be 2**n - 1, n >= 2.
  54. #define TAG_CACHE_SIZE 31
  55. // Maximum time to wait for the mutex in a logging statement.
  56. #define MAX_MUTEX_WAIT_MS 10
  57. #define MAX_MUTEX_WAIT_TICKS ((MAX_MUTEX_WAIT_MS + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS)
  58. // Uncomment this to enable consistency checks and cache statistics in this file.
  59. // #define LOG_BUILTIN_CHECKS
  60. typedef struct {
  61. const char* tag;
  62. uint32_t level : 3;
  63. uint32_t generation : 29;
  64. } cached_tag_entry_t;
  65. typedef struct uncached_tag_entry_{
  66. struct uncached_tag_entry_* next;
  67. uint8_t level; // esp_log_level_t as uint8_t
  68. char tag[0]; // beginning of a zero-terminated string
  69. } uncached_tag_entry_t;
  70. static esp_log_level_t s_log_default_level = ESP_LOG_VERBOSE;
  71. static uncached_tag_entry_t* s_log_tags_head = NULL;
  72. static uncached_tag_entry_t* s_log_tags_tail = NULL;
  73. static cached_tag_entry_t s_log_cache[TAG_CACHE_SIZE];
  74. static uint32_t s_log_cache_max_generation = 0;
  75. static uint32_t s_log_cache_entry_count = 0;
  76. static vprintf_like_t s_log_print_func = &vprintf;
  77. static SemaphoreHandle_t s_log_mutex = NULL;
  78. #ifdef LOG_BUILTIN_CHECKS
  79. static uint32_t s_log_cache_misses = 0;
  80. #endif
  81. static inline bool get_cached_log_level(const char* tag, esp_log_level_t* level);
  82. static inline bool get_uncached_log_level(const char* tag, esp_log_level_t* level);
  83. static inline void add_to_cache(const char* tag, esp_log_level_t level);
  84. static void heap_bubble_down(int index);
  85. static inline void heap_swap(int i, int j);
  86. static inline bool should_output(esp_log_level_t level_for_message, esp_log_level_t level_for_tag);
  87. static inline void clear_log_level_list();
  88. void esp_log_set_vprintf(vprintf_like_t func)
  89. {
  90. s_log_print_func = func;
  91. }
  92. void esp_log_level_set(const char* tag, esp_log_level_t level)
  93. {
  94. if (!s_log_mutex) {
  95. s_log_mutex = xSemaphoreCreateMutex();
  96. }
  97. xSemaphoreTake(s_log_mutex, portMAX_DELAY);
  98. // for wildcard tag, remove all linked list items and clear the cache
  99. if (strcmp(tag, "*") == 0) {
  100. s_log_default_level = level;
  101. clear_log_level_list();
  102. xSemaphoreGive(s_log_mutex);
  103. return;
  104. }
  105. // allocate new linked list entry and append it to the endo of the list
  106. size_t entry_size = offsetof(uncached_tag_entry_t, tag) + strlen(tag) + 1;
  107. uncached_tag_entry_t* new_entry = (uncached_tag_entry_t*) malloc(entry_size);
  108. if (!new_entry) {
  109. xSemaphoreGive(s_log_mutex);
  110. return;
  111. }
  112. new_entry->next = NULL;
  113. new_entry->level = (uint8_t) level;
  114. strcpy(new_entry->tag, tag);
  115. if (s_log_tags_tail) {
  116. s_log_tags_tail->next = new_entry;
  117. }
  118. s_log_tags_tail = new_entry;
  119. if (!s_log_tags_head) {
  120. s_log_tags_head = new_entry;
  121. }
  122. xSemaphoreGive(s_log_mutex);
  123. }
  124. void clear_log_level_list()
  125. {
  126. for (uncached_tag_entry_t* it = s_log_tags_head; it != NULL; ) {
  127. uncached_tag_entry_t* next = it->next;
  128. free(it);
  129. it = next;
  130. }
  131. s_log_tags_tail = NULL;
  132. s_log_tags_head = NULL;
  133. s_log_cache_entry_count = 0;
  134. s_log_cache_max_generation = 0;
  135. #ifdef LOG_BUILTIN_CHECKS
  136. s_log_cache_misses = 0;
  137. #endif
  138. }
  139. void IRAM_ATTR esp_log_write(esp_log_level_t level,
  140. const char* tag,
  141. const char* format, ...)
  142. {
  143. if (!s_log_mutex) {
  144. s_log_mutex = xSemaphoreCreateMutex();
  145. }
  146. if (xSemaphoreTake(s_log_mutex, MAX_MUTEX_WAIT_TICKS) == pdFALSE) {
  147. return;
  148. }
  149. esp_log_level_t level_for_tag;
  150. // Look for the tag in cache first, then in the linked list of all tags
  151. if (!get_cached_log_level(tag, &level_for_tag)) {
  152. if (!get_uncached_log_level(tag, &level_for_tag)) {
  153. level_for_tag = s_log_default_level;
  154. }
  155. add_to_cache(tag, level_for_tag);
  156. #ifdef LOG_BUILTIN_CHECKS
  157. ++s_log_cache_misses;
  158. #endif
  159. }
  160. xSemaphoreGive(s_log_mutex);
  161. if (!should_output(level, level_for_tag)) {
  162. return;
  163. }
  164. va_list list;
  165. va_start(list, format);
  166. (*s_log_print_func)(format, list);
  167. va_end(list);
  168. }
  169. static inline bool get_cached_log_level(const char* tag, esp_log_level_t* level)
  170. {
  171. // Look for `tag` in cache
  172. int i;
  173. for (i = 0; i < s_log_cache_entry_count; ++i) {
  174. #ifdef LOG_BUILTIN_CHECKS
  175. assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation);
  176. #endif
  177. if (s_log_cache[i].tag == tag) {
  178. break;
  179. }
  180. }
  181. if (i == s_log_cache_entry_count) { // Not found in cache
  182. return false;
  183. }
  184. // Return level from cache
  185. *level = (esp_log_level_t) s_log_cache[i].level;
  186. // If cache has been filled, start taking ordering into account
  187. // (other options are: dynamically resize cache, add "dummy" entries
  188. // to the cache; this option was chosen because code is much simpler,
  189. // and the unfair behavior of cache will show it self at most once, when
  190. // it has just been filled)
  191. if (s_log_cache_entry_count == TAG_CACHE_SIZE) {
  192. // Update item generation
  193. s_log_cache[i].generation = s_log_cache_max_generation++;
  194. // Restore heap ordering
  195. heap_bubble_down(i);
  196. }
  197. return true;
  198. }
  199. static inline void add_to_cache(const char* tag, esp_log_level_t level)
  200. {
  201. uint32_t generation = s_log_cache_max_generation++;
  202. // First consider the case when cache is not filled yet.
  203. // In this case, just add new entry at the end.
  204. // This happens to satisfy binary min-heap ordering.
  205. if (s_log_cache_entry_count < TAG_CACHE_SIZE) {
  206. s_log_cache[s_log_cache_entry_count] = (cached_tag_entry_t) {
  207. .generation = generation,
  208. .level = level,
  209. .tag = tag
  210. };
  211. ++s_log_cache_entry_count;
  212. return;
  213. }
  214. // Cache is full, so we replace the oldest entry (which is at index 0
  215. // because this is a min-heap) with the new one, and do bubble-down
  216. // operation to restore min-heap ordering.
  217. s_log_cache[0] = (cached_tag_entry_t) {
  218. .tag = tag,
  219. .level = level,
  220. .generation = generation
  221. };
  222. heap_bubble_down(0);
  223. }
  224. static inline bool get_uncached_log_level(const char* tag, esp_log_level_t* level)
  225. {
  226. // Walk the linked list of all tags and see if given tag is present in the list.
  227. // This is slow because tags are compared as strings.
  228. for (uncached_tag_entry_t* it = s_log_tags_head; it != NULL; it = it->next) {
  229. if (strcmp(tag, it->tag) == 0) {
  230. *level = it->level;
  231. return true;
  232. }
  233. }
  234. return false;
  235. }
  236. static inline bool should_output(esp_log_level_t level_for_message, esp_log_level_t level_for_tag)
  237. {
  238. return level_for_message <= level_for_tag;
  239. }
  240. static void heap_bubble_down(int index)
  241. {
  242. while (index < TAG_CACHE_SIZE / 2) {
  243. int left_index = index * 2 + 1;
  244. int right_index = left_index + 1;
  245. int next = (s_log_cache[left_index].generation < s_log_cache[right_index].generation) ? left_index : right_index;
  246. heap_swap(index, next);
  247. index = next;
  248. }
  249. }
  250. static inline void heap_swap(int i, int j)
  251. {
  252. cached_tag_entry_t tmp = s_log_cache[i];
  253. s_log_cache[i] = s_log_cache[j];
  254. s_log_cache[j] = tmp;
  255. }
  256. #endif //BOOTLOADER_BUILD
  257. #ifndef BOOTLOADER_BUILD
  258. #define ATTR IRAM_ATTR
  259. #else
  260. #define ATTR
  261. #endif // BOOTLOADER_BUILD
  262. uint32_t ATTR esp_log_early_timestamp()
  263. {
  264. return xthal_get_ccount() / (CPU_CLK_FREQ_ROM / 1000);
  265. }
  266. #ifndef BOOTLOADER_BUILD
  267. uint32_t IRAM_ATTR esp_log_timestamp()
  268. {
  269. if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
  270. return esp_log_early_timestamp();
  271. }
  272. static uint32_t base = 0;
  273. if (base == 0) {
  274. base = esp_log_early_timestamp();
  275. }
  276. return base + xTaskGetTickCount() * (1000 / configTICK_RATE_HZ);
  277. }
  278. #else
  279. uint32_t esp_log_timestamp() __attribute__((alias("esp_log_early_timestamp")));
  280. #endif //BOOTLOADER_BUILD