sleep_cpu.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stddef.h>
  7. #include <string.h>
  8. #include <inttypes.h>
  9. #include <sys/lock.h>
  10. #include <sys/param.h>
  11. #include "esp_attr.h"
  12. #include "esp_check.h"
  13. #include "esp_sleep.h"
  14. #include "esp_log.h"
  15. #include "esp_crc.h"
  16. #include "freertos/FreeRTOS.h"
  17. #include "freertos/task.h"
  18. #include "esp_heap_caps.h"
  19. #include "soc/soc_caps.h"
  20. #include "esp_private/sleep_cpu.h"
  21. #include "sdkconfig.h"
  22. #if !SOC_PMU_SUPPORTED
  23. #include "hal/rtc_hal.h"
  24. #endif
  25. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  26. #include "esp_private/system_internal.h"
  27. #include "hal/clk_gate_ll.h"
  28. #include "hal/uart_hal.h"
  29. #endif
  30. #include "soc/rtc_periph.h"
  31. #ifdef CONFIG_IDF_TARGET_ESP32S3
  32. #include "esp32s3/rom/cache.h"
  33. #elif CONFIG_IDF_TARGET_ESP32C6
  34. #include "esp32c6/rom/rtc.h"
  35. #include "riscv/rvsleep-frames.h"
  36. #include "soc/intpri_reg.h"
  37. #include "soc/extmem_reg.h"
  38. #include "soc/plic_reg.h"
  39. #include "soc/clint_reg.h"
  40. #include "esp32c6/rom/cache.h"
  41. #endif
  42. static __attribute__((unused)) const char *TAG = "sleep";
  43. typedef struct {
  44. uint32_t start;
  45. uint32_t end;
  46. } cpu_domain_dev_regs_region_t;
  47. typedef struct {
  48. cpu_domain_dev_regs_region_t *region;
  49. int region_num;
  50. uint32_t *regs_frame;
  51. } cpu_domain_dev_sleep_frame_t;
  52. /**
  53. * Internal structure which holds all requested light sleep cpu retention parameters
  54. */
  55. typedef struct {
  56. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  57. rtc_cntl_sleep_retent_t retent;
  58. #elif SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  59. struct {
  60. RvCoreCriticalSleepFrame *critical_frame;
  61. RvCoreNonCriticalSleepFrame *non_critical_frame;
  62. cpu_domain_dev_sleep_frame_t *intpri_frame;
  63. cpu_domain_dev_sleep_frame_t *cache_config_frame;
  64. cpu_domain_dev_sleep_frame_t *plic_frame;
  65. cpu_domain_dev_sleep_frame_t *clint_frame;
  66. } retent;
  67. #endif
  68. } sleep_cpu_retention_t;
  69. static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
  70. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  71. #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  72. static uint32_t cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size)
  73. {
  74. uint32_t sets; /* i/d-cache total set counts */
  75. uint32_t index; /* virtual address mapping i/d-cache row offset */
  76. uint32_t waysgrp;
  77. uint32_t icache_tagmem_blk_gs, dcache_tagmem_blk_gs;
  78. struct cache_mode imode = { .icache = 1 };
  79. struct cache_mode dmode = { .icache = 0 };
  80. /* calculate/prepare i-cache tag memory retention parameters */
  81. Cache_Get_Mode(&imode);
  82. sets = imode.cache_size / imode.cache_ways / imode.cache_line_size;
  83. index = (code_seg_vaddr / imode.cache_line_size) % sets;
  84. waysgrp = imode.cache_ways >> 2;
  85. code_seg_size = ALIGNUP(imode.cache_line_size, code_seg_size);
  86. s_cpu_retention.retent.tagmem.icache.start_point = index;
  87. s_cpu_retention.retent.tagmem.icache.size = (sets * waysgrp) & 0xff;
  88. s_cpu_retention.retent.tagmem.icache.vld_size = s_cpu_retention.retent.tagmem.icache.size;
  89. if (code_seg_size < imode.cache_size / imode.cache_ways) {
  90. s_cpu_retention.retent.tagmem.icache.vld_size = (code_seg_size / imode.cache_line_size) * waysgrp;
  91. }
  92. s_cpu_retention.retent.tagmem.icache.enable = (code_seg_size != 0) ? 1 : 0;
  93. icache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.icache.vld_size ? s_cpu_retention.retent.tagmem.icache.vld_size : sets * waysgrp;
  94. icache_tagmem_blk_gs = ALIGNUP(4, icache_tagmem_blk_gs);
  95. ESP_LOGD(TAG, "I-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (imode.cache_size>>10),
  96. imode.cache_line_size, imode.cache_ways, sets, index, icache_tagmem_blk_gs);
  97. /* calculate/prepare d-cache tag memory retention parameters */
  98. Cache_Get_Mode(&dmode);
  99. sets = dmode.cache_size / dmode.cache_ways / dmode.cache_line_size;
  100. index = (data_seg_vaddr / dmode.cache_line_size) % sets;
  101. waysgrp = dmode.cache_ways >> 2;
  102. data_seg_size = ALIGNUP(dmode.cache_line_size, data_seg_size);
  103. s_cpu_retention.retent.tagmem.dcache.start_point = index;
  104. s_cpu_retention.retent.tagmem.dcache.size = (sets * waysgrp) & 0x1ff;
  105. s_cpu_retention.retent.tagmem.dcache.vld_size = s_cpu_retention.retent.tagmem.dcache.size;
  106. #ifndef CONFIG_ESP32S3_DATA_CACHE_16KB
  107. if (data_seg_size < dmode.cache_size / dmode.cache_ways) {
  108. s_cpu_retention.retent.tagmem.dcache.vld_size = (data_seg_size / dmode.cache_line_size) * waysgrp;
  109. }
  110. s_cpu_retention.retent.tagmem.dcache.enable = (data_seg_size != 0) ? 1 : 0;
  111. #else
  112. s_cpu_retention.retent.tagmem.dcache.enable = 1;
  113. #endif
  114. dcache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.dcache.vld_size ? s_cpu_retention.retent.tagmem.dcache.vld_size : sets * waysgrp;
  115. dcache_tagmem_blk_gs = ALIGNUP(4, dcache_tagmem_blk_gs);
  116. ESP_LOGD(TAG, "D-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (dmode.cache_size>>10),
  117. dmode.cache_line_size, dmode.cache_ways, sets, index, dcache_tagmem_blk_gs);
  118. /* For I or D cache tagmem retention, backup and restore are performed through
  119. * RTC DMA (its bus width is 128 bits), For I/D Cache tagmem blocks (i-cache
  120. * tagmem blocks = 92 bits, d-cache tagmem blocks = 88 bits), RTC DMA automatically
  121. * aligns its bit width to 96 bits, therefore, 3 times RTC DMA can transfer 4
  122. * i/d-cache tagmem blocks (128 bits * 3 = 96 bits * 4) */
  123. return (((icache_tagmem_blk_gs + dcache_tagmem_blk_gs) << 2) * 3);
  124. }
  125. #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  126. static esp_err_t esp_sleep_tagmem_pd_low_init(void)
  127. {
  128. #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  129. if (s_cpu_retention.retent.tagmem.link_addr == NULL) {
  130. extern char _stext[], _etext[];
  131. uint32_t code_start = (uint32_t)_stext;
  132. uint32_t code_size = (uint32_t)(_etext - _stext);
  133. #if !(CONFIG_SPIRAM && CONFIG_SOC_PM_SUPPORT_TAGMEM_PD)
  134. extern char _rodata_start[], _rodata_reserved_end[];
  135. uint32_t data_start = (uint32_t)_rodata_start;
  136. uint32_t data_size = (uint32_t)(_rodata_reserved_end - _rodata_start);
  137. #else
  138. uint32_t data_start = SOC_DROM_LOW;
  139. uint32_t data_size = SOC_EXTRAM_DATA_SIZE;
  140. #endif
  141. ESP_LOGI(TAG, "Code start at 0x%08"PRIx32", total %"PRIu32", data start at 0x%08"PRIx32", total %"PRIu32" Bytes",
  142. code_start, code_size, data_start, data_size);
  143. uint32_t tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size);
  144. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, 1,
  145. tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE,
  146. MALLOC_CAP_RETENTION);
  147. if (buf) {
  148. s_cpu_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf,
  149. buf + RTC_HAL_DMA_LINK_NODE_SIZE, tagmem_sz, NULL);
  150. } else {
  151. s_cpu_retention.retent.tagmem.icache.enable = 0;
  152. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  153. s_cpu_retention.retent.tagmem.link_addr = NULL;
  154. return ESP_ERR_NO_MEM;
  155. }
  156. }
  157. #else // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  158. s_cpu_retention.retent.tagmem.icache.enable = 0;
  159. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  160. s_cpu_retention.retent.tagmem.link_addr = NULL;
  161. #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  162. return ESP_OK;
  163. }
  164. static esp_err_t esp_sleep_tagmem_pd_low_deinit(void)
  165. {
  166. #if SOC_PM_SUPPORT_TAGMEM_PD
  167. if (s_cpu_retention.retent.tagmem.link_addr) {
  168. heap_caps_free(s_cpu_retention.retent.tagmem.link_addr);
  169. s_cpu_retention.retent.tagmem.icache.enable = 0;
  170. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  171. s_cpu_retention.retent.tagmem.link_addr = NULL;
  172. }
  173. #endif
  174. return ESP_OK;
  175. }
  176. #endif // SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  177. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  178. esp_err_t esp_sleep_cpu_pd_low_init(void)
  179. {
  180. if (s_cpu_retention.retent.cpu_pd_mem == NULL) {
  181. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, 1,
  182. SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE,
  183. MALLOC_CAP_RETENTION);
  184. if (buf) {
  185. s_cpu_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
  186. buf + RTC_HAL_DMA_LINK_NODE_SIZE, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE, NULL);
  187. } else {
  188. return ESP_ERR_NO_MEM;
  189. }
  190. }
  191. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  192. if (esp_sleep_tagmem_pd_low_init() != ESP_OK) {
  193. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  194. esp_sleep_cpu_pd_low_deinit();
  195. return ESP_ERR_NO_MEM;
  196. #endif
  197. }
  198. #endif
  199. return ESP_OK;
  200. }
  201. esp_err_t esp_sleep_cpu_pd_low_deinit(void)
  202. {
  203. if (s_cpu_retention.retent.cpu_pd_mem) {
  204. heap_caps_free(s_cpu_retention.retent.cpu_pd_mem);
  205. s_cpu_retention.retent.cpu_pd_mem = NULL;
  206. }
  207. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  208. if (esp_sleep_tagmem_pd_low_deinit() != ESP_OK) {
  209. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  210. esp_sleep_cpu_pd_low_deinit();
  211. return ESP_ERR_NO_MEM;
  212. #endif
  213. }
  214. #endif
  215. return ESP_OK;
  216. }
  217. void sleep_enable_cpu_retention(void)
  218. {
  219. rtc_cntl_hal_enable_cpu_retention(&s_cpu_retention.retent);
  220. #if SOC_PM_SUPPORT_TAGMEM_PD
  221. rtc_cntl_hal_enable_tagmem_retention(&s_cpu_retention.retent);
  222. #endif
  223. }
  224. void IRAM_ATTR sleep_disable_cpu_retention(void)
  225. {
  226. rtc_cntl_hal_disable_cpu_retention(&s_cpu_retention.retent);
  227. #if SOC_PM_SUPPORT_TAGMEM_PD
  228. rtc_cntl_hal_disable_tagmem_retention(&s_cpu_retention.retent);
  229. #endif
  230. }
  231. #endif
  232. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  233. #define CUSTOM_CSR_PCER_MACHINE 0x7e0
  234. #define CUSTOM_CSR_PCMR_MACHINE 0x7e1
  235. #define CUSTOM_CSR_PCCR_MACHINE 0x7e2
  236. #define CUSTOM_CSR_CPU_TESTBUS_CTRL 0x7e3
  237. #define CUSTOM_CSR_PCER_USER 0x800
  238. #define CUSTOM_CSR_PCMR_USER 0x801
  239. #define CUSTOM_CSR_PCCR_USER 0x802
  240. #define CUSTOM_CSR_GPIO_OEN_USER 0x803
  241. #define CUSTOM_CSR_GPIO_IN_USER 0x804
  242. #define CUSTOM_CSR_GPIO_OUT_USER 0x805
  243. #define CUSTOM_CSR_CO_EXCEPTION_CAUSE 0x7f0
  244. #define CUSTOM_CSR_CO_HWLP 0x7f1
  245. #define CUSTOM_CSR_CO_AIA 0x7f2
  246. extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
  247. static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num)
  248. {
  249. const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num;
  250. int regs_frame_sz = 0;
  251. for (int num = 0; num < region_num; num++) {
  252. regs_frame_sz += regions[num].end - regions[num].start;
  253. }
  254. void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  255. if (frame) {
  256. cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t));
  257. memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t));
  258. void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz;
  259. memset(regs_frame, 0, regs_frame_sz);
  260. *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) {
  261. .region = region,
  262. .region_num = region_num,
  263. .regs_frame = (uint32_t *)regs_frame
  264. };
  265. }
  266. return frame;
  267. }
  268. static inline void * cpu_domain_intpri_sleep_frame_alloc_and_init(void)
  269. {
  270. const static cpu_domain_dev_regs_region_t regions[] = {
  271. { .start = INTPRI_CORE0_CPU_INT_ENABLE_REG, .end = INTPRI_RND_ECO_LOW_REG + 4 },
  272. { .start = INTPRI_RND_ECO_HIGH_REG, .end = INTPRI_RND_ECO_HIGH_REG + 4 }
  273. };
  274. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  275. }
  276. static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
  277. {
  278. const static cpu_domain_dev_regs_region_t regions[] = {
  279. { .start = EXTMEM_L1_CACHE_CTRL_REG, .end = EXTMEM_L1_CACHE_CTRL_REG + 4 },
  280. { .start = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
  281. };
  282. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  283. }
  284. static inline void * cpu_domain_plic_sleep_frame_alloc_and_init(void)
  285. {
  286. const static cpu_domain_dev_regs_region_t regions[] = {
  287. { .start = PLIC_MXINT_ENABLE_REG, .end = PLIC_MXINT_CLAIM_REG + 4 },
  288. { .start = PLIC_MXINT_CONF_REG, .end = PLIC_MXINT_CONF_REG + 4 },
  289. { .start = PLIC_UXINT_ENABLE_REG, .end = PLIC_UXINT_CLAIM_REG + 4 },
  290. { .start = PLIC_UXINT_CONF_REG, .end = PLIC_UXINT_CONF_REG + 4 }
  291. };
  292. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  293. }
  294. static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(void)
  295. {
  296. const static cpu_domain_dev_regs_region_t regions[] = {
  297. { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_MTIMECMP_H_REG + 4 },
  298. { .start = CLINT_UINT_SIP_REG, .end = CLINT_UINT_UTIMECMP_H_REG + 4 }
  299. };
  300. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  301. }
  302. static esp_err_t esp_sleep_cpu_retention_init_impl(void)
  303. {
  304. if (s_cpu_retention.retent.critical_frame == NULL) {
  305. void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  306. if (frame == NULL) {
  307. goto err;
  308. }
  309. s_cpu_retention.retent.critical_frame = (RvCoreCriticalSleepFrame *)frame;
  310. rv_core_critical_regs_frame = (RvCoreCriticalSleepFrame *)frame;
  311. }
  312. if (s_cpu_retention.retent.non_critical_frame == NULL) {
  313. void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  314. if (frame == NULL) {
  315. goto err;
  316. }
  317. s_cpu_retention.retent.non_critical_frame = (RvCoreNonCriticalSleepFrame *)frame;
  318. }
  319. if (s_cpu_retention.retent.intpri_frame == NULL) {
  320. void *frame = cpu_domain_intpri_sleep_frame_alloc_and_init();
  321. if (frame == NULL) {
  322. goto err;
  323. }
  324. s_cpu_retention.retent.intpri_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  325. }
  326. if (s_cpu_retention.retent.cache_config_frame == NULL) {
  327. void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
  328. if (frame == NULL) {
  329. goto err;
  330. }
  331. s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  332. }
  333. if (s_cpu_retention.retent.plic_frame == NULL) {
  334. void *frame = cpu_domain_plic_sleep_frame_alloc_and_init();
  335. if (frame == NULL) {
  336. goto err;
  337. }
  338. s_cpu_retention.retent.plic_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  339. }
  340. if (s_cpu_retention.retent.clint_frame == NULL) {
  341. void *frame = cpu_domain_clint_sleep_frame_alloc_and_init();
  342. if (frame == NULL) {
  343. goto err;
  344. }
  345. s_cpu_retention.retent.clint_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  346. }
  347. return ESP_OK;
  348. err:
  349. esp_sleep_cpu_retention_deinit();
  350. return ESP_ERR_NO_MEM;
  351. }
  352. static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
  353. {
  354. if (s_cpu_retention.retent.critical_frame) {
  355. heap_caps_free((void *)s_cpu_retention.retent.critical_frame);
  356. s_cpu_retention.retent.critical_frame = NULL;
  357. rv_core_critical_regs_frame = NULL;
  358. }
  359. if (s_cpu_retention.retent.non_critical_frame) {
  360. heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame);
  361. s_cpu_retention.retent.non_critical_frame = NULL;
  362. }
  363. if (s_cpu_retention.retent.intpri_frame) {
  364. heap_caps_free((void *)s_cpu_retention.retent.intpri_frame);
  365. s_cpu_retention.retent.intpri_frame = NULL;
  366. }
  367. if (s_cpu_retention.retent.cache_config_frame) {
  368. heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
  369. s_cpu_retention.retent.cache_config_frame = NULL;
  370. }
  371. if (s_cpu_retention.retent.plic_frame) {
  372. heap_caps_free((void *)s_cpu_retention.retent.plic_frame);
  373. s_cpu_retention.retent.plic_frame = NULL;
  374. }
  375. if (s_cpu_retention.retent.clint_frame) {
  376. heap_caps_free((void *)s_cpu_retention.retent.clint_frame);
  377. s_cpu_retention.retent.clint_frame = NULL;
  378. }
  379. return ESP_OK;
  380. }
  381. static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void)
  382. {
  383. uint32_t mstatus;
  384. __asm__ __volatile__ (
  385. "csrr %0, mstatus\n"
  386. "csrci mstatus, 0x8\n"
  387. : "=r"(mstatus)
  388. );
  389. return mstatus;
  390. }
  391. static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus)
  392. {
  393. __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus));
  394. }
  395. static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
  396. {
  397. assert(s_cpu_retention.retent.non_critical_frame);
  398. RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame;
  399. frame->mscratch = RV_READ_CSR(mscratch);
  400. frame->mideleg = RV_READ_CSR(mideleg);
  401. frame->misa = RV_READ_CSR(misa);
  402. frame->tselect = RV_READ_CSR(tselect);
  403. frame->tdata1 = RV_READ_CSR(tdata1);
  404. frame->tdata2 = RV_READ_CSR(tdata2);
  405. frame->tcontrol = RV_READ_CSR(tcontrol);
  406. frame->pmpcfg0 = RV_READ_CSR(pmpcfg0);
  407. frame->pmpcfg1 = RV_READ_CSR(pmpcfg1);
  408. frame->pmpcfg2 = RV_READ_CSR(pmpcfg2);
  409. frame->pmpcfg3 = RV_READ_CSR(pmpcfg3);
  410. frame->pmpaddr0 = RV_READ_CSR(pmpaddr0);
  411. frame->pmpaddr1 = RV_READ_CSR(pmpaddr1);
  412. frame->pmpaddr2 = RV_READ_CSR(pmpaddr2);
  413. frame->pmpaddr3 = RV_READ_CSR(pmpaddr3);
  414. frame->pmpaddr4 = RV_READ_CSR(pmpaddr4);
  415. frame->pmpaddr5 = RV_READ_CSR(pmpaddr5);
  416. frame->pmpaddr6 = RV_READ_CSR(pmpaddr6);
  417. frame->pmpaddr7 = RV_READ_CSR(pmpaddr7);
  418. frame->pmpaddr8 = RV_READ_CSR(pmpaddr8);
  419. frame->pmpaddr9 = RV_READ_CSR(pmpaddr9);
  420. frame->pmpaddr10 = RV_READ_CSR(pmpaddr10);
  421. frame->pmpaddr11 = RV_READ_CSR(pmpaddr11);
  422. frame->pmpaddr12 = RV_READ_CSR(pmpaddr12);
  423. frame->pmpaddr13 = RV_READ_CSR(pmpaddr13);
  424. frame->pmpaddr14 = RV_READ_CSR(pmpaddr14);
  425. frame->pmpaddr15 = RV_READ_CSR(pmpaddr15);
  426. frame->utvec = RV_READ_CSR(utvec);
  427. frame->ustatus = RV_READ_CSR(ustatus);
  428. frame->uepc = RV_READ_CSR(uepc);
  429. frame->ucause = RV_READ_CSR(ucause);
  430. frame->mpcer = RV_READ_CSR(CUSTOM_CSR_PCER_MACHINE);
  431. frame->mpcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_MACHINE);
  432. frame->mpccr = RV_READ_CSR(CUSTOM_CSR_PCCR_MACHINE);
  433. frame->cpu_testbus_ctrl = RV_READ_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL);
  434. frame->upcer = RV_READ_CSR(CUSTOM_CSR_PCER_USER);
  435. frame->upcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_USER);
  436. frame->upccr = RV_READ_CSR(CUSTOM_CSR_PCCR_USER);
  437. frame->ugpio_oen = RV_READ_CSR(CUSTOM_CSR_GPIO_OEN_USER);
  438. frame->ugpio_in = RV_READ_CSR(CUSTOM_CSR_GPIO_IN_USER);
  439. frame->ugpio_out = RV_READ_CSR(CUSTOM_CSR_GPIO_OUT_USER);
  440. return frame;
  441. }
  442. static IRAM_ATTR void rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame *frame)
  443. {
  444. assert(frame);
  445. RV_WRITE_CSR(mscratch, frame->mscratch);
  446. RV_WRITE_CSR(mideleg, frame->mideleg);
  447. RV_WRITE_CSR(misa, frame->misa);
  448. RV_WRITE_CSR(tselect, frame->tselect);
  449. RV_WRITE_CSR(tdata1, frame->tdata1);
  450. RV_WRITE_CSR(tdata2, frame->tdata2);
  451. RV_WRITE_CSR(tcontrol, frame->tcontrol);
  452. RV_WRITE_CSR(pmpcfg0, frame->pmpcfg0);
  453. RV_WRITE_CSR(pmpcfg1, frame->pmpcfg1);
  454. RV_WRITE_CSR(pmpcfg2, frame->pmpcfg2);
  455. RV_WRITE_CSR(pmpcfg3, frame->pmpcfg3);
  456. RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0);
  457. RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1);
  458. RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2);
  459. RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3);
  460. RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4);
  461. RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5);
  462. RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6);
  463. RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7);
  464. RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8);
  465. RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9);
  466. RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10);
  467. RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11);
  468. RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12);
  469. RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13);
  470. RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14);
  471. RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15);
  472. RV_WRITE_CSR(utvec, frame->utvec);
  473. RV_WRITE_CSR(ustatus, frame->ustatus);
  474. RV_WRITE_CSR(uepc, frame->uepc);
  475. RV_WRITE_CSR(ucause, frame->ucause);
  476. RV_WRITE_CSR(CUSTOM_CSR_PCER_MACHINE, frame->mpcer);
  477. RV_WRITE_CSR(CUSTOM_CSR_PCMR_MACHINE, frame->mpcmr);
  478. RV_WRITE_CSR(CUSTOM_CSR_PCCR_MACHINE, frame->mpccr);
  479. RV_WRITE_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL, frame->cpu_testbus_ctrl);
  480. RV_WRITE_CSR(CUSTOM_CSR_PCER_USER, frame->upcer);
  481. RV_WRITE_CSR(CUSTOM_CSR_PCMR_USER, frame->upcmr);
  482. RV_WRITE_CSR(CUSTOM_CSR_PCCR_USER, frame->upccr);
  483. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OEN_USER,frame->ugpio_oen);
  484. RV_WRITE_CSR(CUSTOM_CSR_GPIO_IN_USER, frame->ugpio_in);
  485. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OUT_USER,frame->ugpio_out);
  486. }
  487. static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame)
  488. {
  489. assert(frame);
  490. cpu_domain_dev_regs_region_t *region = frame->region;
  491. uint32_t *regs_frame = frame->regs_frame;
  492. int offset = 0;
  493. for (int i = 0; i < frame->region_num; i++) {
  494. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  495. regs_frame[offset++] = *(uint32_t *)addr;
  496. }
  497. }
  498. }
  499. static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame)
  500. {
  501. assert(frame);
  502. cpu_domain_dev_regs_region_t *region = frame->region;
  503. uint32_t *regs_frame = frame->regs_frame;
  504. int offset = 0;
  505. for (int i = 0; i < frame->region_num; i++) {
  506. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  507. *(uint32_t *)addr = regs_frame[offset++];
  508. }
  509. }
  510. }
  511. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  512. static void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
  513. {
  514. *(frame_crc_ptr) = esp_crc32_le(0, (void *)frame_ptr, frame_check_size);
  515. }
  516. static void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
  517. {
  518. if(*(frame_crc_ptr) != esp_crc32_le(0, (void *)(frame_ptr), frame_check_size)){
  519. // resume uarts
  520. for (int i = 0; i < SOC_UART_NUM; ++i) {
  521. #ifndef CONFIG_IDF_TARGET_ESP32
  522. if (!periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) {
  523. continue;
  524. }
  525. #endif
  526. uart_ll_force_xon(i);
  527. }
  528. /* Since it is still in the critical now, use ESP_EARLY_LOG */
  529. ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted");
  530. esp_restart_noos();
  531. }
  532. }
  533. #endif
  534. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void);
  535. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void);
  536. typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
  537. static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
  538. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  539. {
  540. RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save();
  541. if ((frame->pmufunc & 0x3) == 0x1) {
  542. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  543. /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */
  544. update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
  545. #endif
  546. REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore);
  547. return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  548. }
  549. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  550. else {
  551. validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
  552. }
  553. #endif
  554. return ESP_OK;
  555. }
  556. esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
  557. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  558. {
  559. uint32_t mstatus = save_mstatus_and_disable_global_int();
  560. /* wait cache idle */
  561. Cache_Freeze_ICache_Enable(CACHE_FREEZE_ACK_BUSY);
  562. Cache_Freeze_ICache_Disable();
  563. cpu_domain_dev_regs_save(s_cpu_retention.retent.plic_frame);
  564. cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
  565. cpu_domain_dev_regs_save(s_cpu_retention.retent.intpri_frame);
  566. cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
  567. RvCoreNonCriticalSleepFrame *frame = rv_core_noncritical_regs_save();
  568. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  569. /* Minus sizeof(long) is for bypass `frame_crc` field */
  570. update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
  571. #endif
  572. esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  573. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  574. validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
  575. #endif
  576. rv_core_noncritical_regs_restore(frame);
  577. cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
  578. cpu_domain_dev_regs_restore(s_cpu_retention.retent.intpri_frame);
  579. cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
  580. cpu_domain_dev_regs_restore(s_cpu_retention.retent.plic_frame);
  581. restore_mstatus(mstatus);
  582. return err;
  583. }
  584. #endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  585. #if SOC_PM_SUPPORT_CPU_PD
  586. esp_err_t esp_sleep_cpu_retention_init(void)
  587. {
  588. esp_err_t err = ESP_OK;
  589. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  590. err = esp_sleep_cpu_pd_low_init();
  591. #elif SOC_PM_CPU_RETENTION_BY_SW
  592. err = esp_sleep_cpu_retention_init_impl();
  593. #endif
  594. return err;
  595. }
  596. esp_err_t esp_sleep_cpu_retention_deinit(void)
  597. {
  598. esp_err_t err = ESP_OK;
  599. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  600. err = esp_sleep_cpu_pd_low_deinit();
  601. #elif SOC_PM_CPU_RETENTION_BY_SW
  602. err = esp_sleep_cpu_retention_deinit_impl();
  603. #endif
  604. return err;
  605. }
  606. bool cpu_domain_pd_allowed(void)
  607. {
  608. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  609. return (s_cpu_retention.retent.cpu_pd_mem != NULL);
  610. #elif SOC_PM_CPU_RETENTION_BY_SW
  611. return (s_cpu_retention.retent.critical_frame != NULL) && \
  612. (s_cpu_retention.retent.non_critical_frame != NULL) && \
  613. (s_cpu_retention.retent.intpri_frame != NULL) && \
  614. (s_cpu_retention.retent.cache_config_frame != NULL) && \
  615. (s_cpu_retention.retent.plic_frame != NULL) && \
  616. (s_cpu_retention.retent.clint_frame != NULL);
  617. #else
  618. return false;
  619. #endif
  620. }
  621. esp_err_t sleep_cpu_configure(bool light_sleep_enable)
  622. {
  623. #if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP
  624. if (light_sleep_enable) {
  625. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep.");
  626. } else {
  627. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory");
  628. }
  629. #endif
  630. return ESP_OK;
  631. }
  632. #endif