app_trace_util.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. //
  7. #include "freertos/FreeRTOS.h"
  8. #include "freertos/task.h"
  9. #include "esp_app_trace_util.h"
  10. #include "sdkconfig.h"
  11. ///////////////////////////////////////////////////////////////////////////////
  12. ///////////////////////////////// Locks /////////////////////////////////////
  13. ///////////////////////////////////////////////////////////////////////////////
  14. #if ESP_APPTRACE_PRINT_LOCK
  15. static esp_apptrace_lock_t s_log_lock = {.irq_stat = 0, .portmux = portMUX_INITIALIZER_UNLOCKED};
  16. #endif
  17. int esp_apptrace_log_lock(void)
  18. {
  19. #if ESP_APPTRACE_PRINT_LOCK
  20. esp_apptrace_tmo_t tmo;
  21. esp_apptrace_tmo_init(&tmo, ESP_APPTRACE_TMO_INFINITE);
  22. int ret = esp_apptrace_lock_take(&s_log_lock, &tmo);
  23. return ret;
  24. #else
  25. return 0;
  26. #endif
  27. }
  28. void esp_apptrace_log_unlock(void)
  29. {
  30. #if ESP_APPTRACE_PRINT_LOCK
  31. esp_apptrace_lock_give(&s_log_lock);
  32. #endif
  33. }
  34. ///////////////////////////////////////////////////////////////////////////////
  35. ///////////////////////////////// TIMEOUT /////////////////////////////////////
  36. ///////////////////////////////////////////////////////////////////////////////
  37. esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
  38. {
  39. if (tmo->tmo != (int64_t)-1) {
  40. tmo->elapsed = esp_timer_get_time() - tmo->start;
  41. if (tmo->elapsed >= tmo->tmo) {
  42. return ESP_ERR_TIMEOUT;
  43. }
  44. }
  45. return ESP_OK;
  46. }
  47. ///////////////////////////////////////////////////////////////////////////////
  48. ///////////////////////////////// LOCK ////////////////////////////////////////
  49. ///////////////////////////////////////////////////////////////////////////////
  50. esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *tmo)
  51. {
  52. int res;
  53. while (1) {
  54. // do not overwrite lock->int_state before we actually acquired the mux
  55. unsigned int_state = portENTER_CRITICAL_NESTED();
  56. // FIXME: if mux is busy it is not good idea to loop during the whole tmo with disabled IRQs.
  57. // So we check mux state using zero tmo, restore IRQs and let others tasks/IRQs to run on this CPU
  58. // while we are doing our own tmo check.
  59. #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
  60. bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0, __FUNCTION__, __LINE__);
  61. #else
  62. bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0);
  63. #endif
  64. if (success) {
  65. lock->int_state = int_state;
  66. return ESP_OK;
  67. }
  68. portEXIT_CRITICAL_NESTED(int_state);
  69. // we can be preempted from this place till the next call (above) to portENTER_CRITICAL_NESTED()
  70. res = esp_apptrace_tmo_check(tmo);
  71. if (res != ESP_OK) {
  72. break;
  73. }
  74. }
  75. return res;
  76. }
  77. esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
  78. {
  79. // save lock's irq state value for this CPU
  80. unsigned int_state = lock->int_state;
  81. // after call to the following func we can not be sure that lock->int_state
  82. // is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take().
  83. #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
  84. vPortCPUReleaseMutex(&lock->mux, __FUNCTION__, __LINE__);
  85. #else
  86. vPortCPUReleaseMutex(&lock->mux);
  87. #endif
  88. portEXIT_CRITICAL_NESTED(int_state);
  89. return ESP_OK;
  90. }
  91. ///////////////////////////////////////////////////////////////////////////////
  92. ////////////////////////////// RING BUFFER ////////////////////////////////////
  93. ///////////////////////////////////////////////////////////////////////////////
  94. uint8_t *esp_apptrace_rb_produce(esp_apptrace_rb_t *rb, uint32_t size)
  95. {
  96. uint8_t *ptr = rb->data + rb->wr;
  97. // check for avalable space
  98. if (rb->rd <= rb->wr) {
  99. // |?R......W??|
  100. if (rb->wr + size >= rb->size) {
  101. if (rb->rd == 0) {
  102. return NULL; // cannot wrap wr
  103. }
  104. if (rb->wr + size == rb->size) {
  105. rb->wr = 0;
  106. } else {
  107. // check if we can wrap wr earlier to get space for requested size
  108. if (size > rb->rd - 1) {
  109. return NULL; // cannot wrap wr
  110. }
  111. // shrink buffer a bit, full size will be restored at rd wrapping
  112. rb->cur_size = rb->wr;
  113. rb->wr = 0;
  114. ptr = rb->data;
  115. if (rb->rd == rb->cur_size) {
  116. rb->rd = 0;
  117. if (rb->cur_size < rb->size) {
  118. rb->cur_size = rb->size;
  119. }
  120. }
  121. rb->wr += size;
  122. }
  123. } else {
  124. rb->wr += size;
  125. }
  126. } else {
  127. // |?W......R??|
  128. if (size > rb->rd - rb->wr - 1) {
  129. return NULL;
  130. }
  131. rb->wr += size;
  132. }
  133. return ptr;
  134. }
  135. uint8_t *esp_apptrace_rb_consume(esp_apptrace_rb_t *rb, uint32_t size)
  136. {
  137. uint8_t *ptr = rb->data + rb->rd;
  138. if (rb->rd <= rb->wr) {
  139. // |?R......W??|
  140. if (rb->rd + size > rb->wr) {
  141. return NULL;
  142. }
  143. rb->rd += size;
  144. } else {
  145. // |?W......R??|
  146. if (rb->rd + size > rb->cur_size) {
  147. return NULL;
  148. } else if (rb->rd + size == rb->cur_size) {
  149. // restore full size usage
  150. if (rb->cur_size < rb->size) {
  151. rb->cur_size = rb->size;
  152. }
  153. rb->rd = 0;
  154. } else {
  155. rb->rd += size;
  156. }
  157. }
  158. return ptr;
  159. }
  160. uint32_t esp_apptrace_rb_read_size_get(esp_apptrace_rb_t *rb)
  161. {
  162. uint32_t size = 0;
  163. if (rb->rd <= rb->wr) {
  164. // |?R......W??|
  165. size = rb->wr - rb->rd;
  166. } else {
  167. // |?W......R??|
  168. size = rb->cur_size - rb->rd;
  169. }
  170. return size;
  171. }
  172. uint32_t esp_apptrace_rb_write_size_get(esp_apptrace_rb_t *rb)
  173. {
  174. uint32_t size = 0;
  175. if (rb->rd <= rb->wr) {
  176. // |?R......W??|
  177. size = rb->size - rb->wr;
  178. if (size && rb->rd == 0) {
  179. size--;
  180. }
  181. } else {
  182. // |?W......R??|
  183. size = rb->rd - rb->wr - 1;
  184. }
  185. return size;
  186. }