cache_utils.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <stdlib.h>
  15. #include <assert.h>
  16. #include <string.h>
  17. #include <stdio.h>
  18. #include <freertos/FreeRTOS.h>
  19. #include <freertos/task.h>
  20. #include <freertos/semphr.h>
  21. #include <rom/spi_flash.h>
  22. #include <rom/cache.h>
  23. #include <soc/soc.h>
  24. #include <soc/dport_reg.h>
  25. #include "sdkconfig.h"
  26. #include "esp_ipc.h"
  27. #include "esp_attr.h"
  28. #include "esp_intr_alloc.h"
  29. #include "esp_spi_flash.h"
  30. #include "esp_log.h"
  31. static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state);
  32. static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state);
  33. static uint32_t s_flash_op_cache_state[2];
  34. #ifndef CONFIG_FREERTOS_UNICORE
  35. static SemaphoreHandle_t s_flash_op_mutex;
  36. static volatile bool s_flash_op_can_start = false;
  37. static volatile bool s_flash_op_complete = false;
  38. #ifndef NDEBUG
  39. static volatile int s_flash_op_cpu = -1;
  40. #endif
  41. void spi_flash_init_lock()
  42. {
  43. s_flash_op_mutex = xSemaphoreCreateMutex();
  44. }
  45. void spi_flash_op_lock()
  46. {
  47. xSemaphoreTake(s_flash_op_mutex, portMAX_DELAY);
  48. }
  49. void spi_flash_op_unlock()
  50. {
  51. xSemaphoreGive(s_flash_op_mutex);
  52. }
  53. void IRAM_ATTR spi_flash_op_block_func(void* arg)
  54. {
  55. // Disable scheduler on this CPU
  56. vTaskSuspendAll();
  57. // Restore interrupts that aren't located in IRAM
  58. esp_intr_noniram_disable();
  59. uint32_t cpuid = (uint32_t) arg;
  60. // Disable cache so that flash operation can start
  61. spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
  62. // s_flash_op_complete flag is cleared on *this* CPU, otherwise the other
  63. // CPU may reset the flag back to false before IPC task has a chance to check it
  64. // (if it is preempted by an ISR taking non-trivial amount of time)
  65. s_flash_op_complete = false;
  66. s_flash_op_can_start = true;
  67. while (!s_flash_op_complete) {
  68. // busy loop here and wait for the other CPU to finish flash operation
  69. }
  70. // Flash operation is complete, re-enable cache
  71. spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
  72. // Restore interrupts that aren't located in IRAM
  73. esp_intr_noniram_enable();
  74. // Re-enable scheduler
  75. xTaskResumeAll();
  76. }
  77. void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu()
  78. {
  79. spi_flash_op_lock();
  80. const uint32_t cpuid = xPortGetCoreID();
  81. const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
  82. #ifndef NDEBUG
  83. // For sanity check later: record the CPU which has started doing flash operation
  84. assert(s_flash_op_cpu == -1);
  85. s_flash_op_cpu = cpuid;
  86. #endif
  87. if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
  88. // Scheduler hasn't been started yet, it means that spi_flash API is being
  89. // called from the 2nd stage bootloader or from user_start_cpu0, i.e. from
  90. // PRO CPU. APP CPU is either in reset or spinning inside user_start_cpu1,
  91. // which is in IRAM. So it is safe to disable cache for the other_cpuid here.
  92. assert(other_cpuid == 1);
  93. spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]);
  94. } else {
  95. // Signal to the spi_flash_op_block_task on the other CPU that we need it to
  96. // disable cache there and block other tasks from executing.
  97. s_flash_op_can_start = false;
  98. esp_err_t ret = esp_ipc_call(other_cpuid, &spi_flash_op_block_func, (void*) other_cpuid);
  99. assert(ret == ESP_OK);
  100. while (!s_flash_op_can_start) {
  101. // Busy loop and wait for spi_flash_op_block_func to disable cache
  102. // on the other CPU
  103. }
  104. // Disable scheduler on the current CPU
  105. vTaskSuspendAll();
  106. // This is guaranteed to run on CPU <cpuid> because the other CPU is now
  107. // occupied by highest priority task
  108. assert(xPortGetCoreID() == cpuid);
  109. }
  110. // Kill interrupts that aren't located in IRAM
  111. esp_intr_noniram_disable();
  112. // Disable cache on this CPU as well
  113. spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
  114. }
  115. void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu()
  116. {
  117. const uint32_t cpuid = xPortGetCoreID();
  118. const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
  119. #ifndef NDEBUG
  120. // Sanity check: flash operation ends on the same CPU as it has started
  121. assert(cpuid == s_flash_op_cpu);
  122. s_flash_op_cpu = -1;
  123. #endif
  124. // Re-enable cache on this CPU
  125. spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
  126. if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
  127. // Scheduler is not running yet — this means we are running on PRO CPU.
  128. // other_cpuid is APP CPU, and it is either in reset or is spinning in
  129. // user_start_cpu1, which is in IRAM. So we can simply reenable cache.
  130. assert(other_cpuid == 1);
  131. spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]);
  132. } else {
  133. // Signal to spi_flash_op_block_task that flash operation is complete
  134. s_flash_op_complete = true;
  135. }
  136. // Re-enable non-iram interrupts
  137. esp_intr_noniram_enable();
  138. // Resume tasks on the current CPU, if the scheduler has started.
  139. // NOTE: enabling non-IRAM interrupts has to happen before this,
  140. // because once the scheduler has started, due to preemption the
  141. // current task can end up being moved to the other CPU.
  142. // But esp_intr_noniram_enable has to be called on the same CPU which
  143. // called esp_intr_noniram_disable
  144. if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
  145. xTaskResumeAll();
  146. }
  147. // Release API lock
  148. spi_flash_op_unlock();
  149. }
  150. void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu_no_os()
  151. {
  152. const uint32_t cpuid = xPortGetCoreID();
  153. const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
  154. // do not care about other CPU, it was halted upon entering panic handler
  155. spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]);
  156. // Kill interrupts that aren't located in IRAM
  157. esp_intr_noniram_disable();
  158. // Disable cache on this CPU as well
  159. spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
  160. }
  161. void IRAM_ATTR spi_flash_enable_interrupts_caches_no_os()
  162. {
  163. const uint32_t cpuid = xPortGetCoreID();
  164. // Re-enable cache on this CPU
  165. spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
  166. // Re-enable non-iram interrupts
  167. esp_intr_noniram_enable();
  168. }
  169. #else // CONFIG_FREERTOS_UNICORE
  170. void spi_flash_init_lock()
  171. {
  172. }
  173. void spi_flash_op_lock()
  174. {
  175. vTaskSuspendAll();
  176. }
  177. void spi_flash_op_unlock()
  178. {
  179. xTaskResumeAll();
  180. }
  181. void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu()
  182. {
  183. spi_flash_op_lock();
  184. esp_intr_noniram_disable();
  185. spi_flash_disable_cache(0, &s_flash_op_cache_state[0]);
  186. }
  187. void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu()
  188. {
  189. spi_flash_restore_cache(0, s_flash_op_cache_state[0]);
  190. esp_intr_noniram_enable();
  191. spi_flash_op_unlock();
  192. }
  193. void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu_no_os()
  194. {
  195. // Kill interrupts that aren't located in IRAM
  196. esp_intr_noniram_disable();
  197. // Disable cache on this CPU as well
  198. spi_flash_disable_cache(0, &s_flash_op_cache_state[0]);
  199. }
  200. void IRAM_ATTR spi_flash_enable_interrupts_caches_no_os()
  201. {
  202. // Re-enable cache on this CPU
  203. spi_flash_restore_cache(0, s_flash_op_cache_state[0]);
  204. // Re-enable non-iram interrupts
  205. esp_intr_noniram_enable();
  206. }
  207. #endif // CONFIG_FREERTOS_UNICORE
  208. /**
  209. * The following two functions are replacements for Cache_Read_Disable and Cache_Read_Enable
  210. * function in ROM. They are used to work around a bug where Cache_Read_Disable requires a call to
  211. * Cache_Flush before Cache_Read_Enable, even if cached data was not modified.
  212. */
  213. static const uint32_t cache_mask = DPORT_APP_CACHE_MASK_OPSDRAM | DPORT_APP_CACHE_MASK_DROM0 |
  214. DPORT_APP_CACHE_MASK_DRAM1 | DPORT_APP_CACHE_MASK_IROM0 |
  215. DPORT_APP_CACHE_MASK_IRAM1 | DPORT_APP_CACHE_MASK_IRAM0;
  216. static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state)
  217. {
  218. uint32_t ret = 0;
  219. if (cpuid == 0) {
  220. ret |= GET_PERI_REG_BITS2(DPORT_PRO_CACHE_CTRL1_REG, cache_mask, 0);
  221. while (GET_PERI_REG_BITS2(DPORT_PRO_DCACHE_DBUG0_REG, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1) {
  222. ;
  223. }
  224. SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL_REG, 1, 0, DPORT_PRO_CACHE_ENABLE_S);
  225. } else {
  226. ret |= GET_PERI_REG_BITS2(DPORT_APP_CACHE_CTRL1_REG, cache_mask, 0);
  227. while (GET_PERI_REG_BITS2(DPORT_APP_DCACHE_DBUG0_REG, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1) {
  228. ;
  229. }
  230. SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL_REG, 1, 0, DPORT_APP_CACHE_ENABLE_S);
  231. }
  232. *saved_state = ret;
  233. }
  234. static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state)
  235. {
  236. if (cpuid == 0) {
  237. SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL_REG, 1, 1, DPORT_PRO_CACHE_ENABLE_S);
  238. SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
  239. } else {
  240. SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL_REG, 1, 1, DPORT_APP_CACHE_ENABLE_S);
  241. SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
  242. }
  243. }
  244. IRAM_ATTR bool spi_flash_cache_enabled()
  245. {
  246. return REG_GET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE)
  247. && REG_GET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
  248. }