sleep_cpu.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stddef.h>
  7. #include <string.h>
  8. #include <inttypes.h>
  9. #include <sys/lock.h>
  10. #include <sys/param.h>
  11. #include "esp_attr.h"
  12. #include "esp_check.h"
  13. #include "esp_sleep.h"
  14. #include "esp_log.h"
  15. #include "esp_crc.h"
  16. #include "freertos/FreeRTOS.h"
  17. #include "freertos/task.h"
  18. #include "esp_heap_caps.h"
  19. #include "soc/soc_caps.h"
  20. #include "esp_private/sleep_cpu.h"
  21. #include "esp_private/sleep_event.h"
  22. #include "sdkconfig.h"
  23. #if SOC_PMU_SUPPORTED
  24. #include "esp_private/esp_pmu.h"
  25. #else
  26. #include "hal/rtc_hal.h"
  27. #endif
  28. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  29. #include "esp_private/system_internal.h"
  30. #include "hal/clk_gate_ll.h"
  31. #include "hal/uart_hal.h"
  32. #endif
  33. #include "soc/rtc_periph.h"
  34. #ifdef CONFIG_IDF_TARGET_ESP32S3
  35. #include "esp32s3/rom/cache.h"
  36. #elif CONFIG_IDF_TARGET_ESP32C6
  37. #include "esp32c6/rom/rtc.h"
  38. #include "riscv/rvsleep-frames.h"
  39. #include "soc/intpri_reg.h"
  40. #include "soc/extmem_reg.h"
  41. #include "soc/plic_reg.h"
  42. #include "soc/clint_reg.h"
  43. #include "esp32c6/rom/cache.h"
  44. #elif CONFIG_IDF_TARGET_ESP32H2
  45. #include "esp32h2/rom/rtc.h"
  46. #include "riscv/rvsleep-frames.h"
  47. #include "soc/intpri_reg.h"
  48. #include "soc/extmem_reg.h"
  49. #include "soc/plic_reg.h"
  50. #include "soc/clint_reg.h"
  51. #include "esp32h2/rom/cache.h"
  52. #endif
  53. static __attribute__((unused)) const char *TAG = "sleep";
  54. typedef struct {
  55. uint32_t start;
  56. uint32_t end;
  57. } cpu_domain_dev_regs_region_t;
  58. typedef struct {
  59. cpu_domain_dev_regs_region_t *region;
  60. int region_num;
  61. uint32_t *regs_frame;
  62. } cpu_domain_dev_sleep_frame_t;
  63. /**
  64. * Internal structure which holds all requested light sleep cpu retention parameters
  65. */
  66. typedef struct {
  67. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  68. rtc_cntl_sleep_retent_t retent;
  69. #elif SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  70. struct {
  71. RvCoreCriticalSleepFrame *critical_frame;
  72. RvCoreNonCriticalSleepFrame *non_critical_frame;
  73. cpu_domain_dev_sleep_frame_t *intpri_frame;
  74. cpu_domain_dev_sleep_frame_t *cache_config_frame;
  75. cpu_domain_dev_sleep_frame_t *plic_frame;
  76. cpu_domain_dev_sleep_frame_t *clint_frame;
  77. } retent;
  78. #endif
  79. } sleep_cpu_retention_t;
  80. static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
  81. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  82. #if CONFIG_PM_RESTORE_CACHE_TAGMEM_AFTER_LIGHT_SLEEP
  83. static uint32_t cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size)
  84. {
  85. uint32_t sets; /* i/d-cache total set counts */
  86. uint32_t index; /* virtual address mapping i/d-cache row offset */
  87. uint32_t waysgrp;
  88. uint32_t icache_tagmem_blk_gs, dcache_tagmem_blk_gs;
  89. struct cache_mode imode = { .icache = 1 };
  90. struct cache_mode dmode = { .icache = 0 };
  91. /* calculate/prepare i-cache tag memory retention parameters */
  92. Cache_Get_Mode(&imode);
  93. sets = imode.cache_size / imode.cache_ways / imode.cache_line_size;
  94. index = (code_seg_vaddr / imode.cache_line_size) % sets;
  95. waysgrp = imode.cache_ways >> 2;
  96. code_seg_size = ALIGNUP(imode.cache_line_size, code_seg_size);
  97. s_cpu_retention.retent.tagmem.icache.start_point = index;
  98. s_cpu_retention.retent.tagmem.icache.size = (sets * waysgrp) & 0xff;
  99. s_cpu_retention.retent.tagmem.icache.vld_size = s_cpu_retention.retent.tagmem.icache.size;
  100. if (code_seg_size < imode.cache_size / imode.cache_ways) {
  101. s_cpu_retention.retent.tagmem.icache.vld_size = (code_seg_size / imode.cache_line_size) * waysgrp;
  102. }
  103. s_cpu_retention.retent.tagmem.icache.enable = (code_seg_size != 0) ? 1 : 0;
  104. icache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.icache.vld_size ? s_cpu_retention.retent.tagmem.icache.vld_size : sets * waysgrp;
  105. icache_tagmem_blk_gs = ALIGNUP(4, icache_tagmem_blk_gs);
  106. ESP_LOGD(TAG, "I-cache size:%"PRIu32" KiB, line size:%d B, ways:%d, sets:%"PRIu32", index:%"PRIu32", tag block groups:%"PRIu32"", (imode.cache_size>>10),
  107. imode.cache_line_size, imode.cache_ways, sets, index, icache_tagmem_blk_gs);
  108. /* calculate/prepare d-cache tag memory retention parameters */
  109. Cache_Get_Mode(&dmode);
  110. sets = dmode.cache_size / dmode.cache_ways / dmode.cache_line_size;
  111. index = (data_seg_vaddr / dmode.cache_line_size) % sets;
  112. waysgrp = dmode.cache_ways >> 2;
  113. data_seg_size = ALIGNUP(dmode.cache_line_size, data_seg_size);
  114. s_cpu_retention.retent.tagmem.dcache.start_point = index;
  115. s_cpu_retention.retent.tagmem.dcache.size = (sets * waysgrp) & 0x1ff;
  116. s_cpu_retention.retent.tagmem.dcache.vld_size = s_cpu_retention.retent.tagmem.dcache.size;
  117. #ifndef CONFIG_ESP32S3_DATA_CACHE_16KB
  118. if (data_seg_size < dmode.cache_size / dmode.cache_ways) {
  119. s_cpu_retention.retent.tagmem.dcache.vld_size = (data_seg_size / dmode.cache_line_size) * waysgrp;
  120. }
  121. s_cpu_retention.retent.tagmem.dcache.enable = (data_seg_size != 0) ? 1 : 0;
  122. #else
  123. s_cpu_retention.retent.tagmem.dcache.enable = 1;
  124. #endif
  125. dcache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.dcache.vld_size ? s_cpu_retention.retent.tagmem.dcache.vld_size : sets * waysgrp;
  126. dcache_tagmem_blk_gs = ALIGNUP(4, dcache_tagmem_blk_gs);
  127. ESP_LOGD(TAG, "D-cache size:%"PRIu32" KiB, line size:%d B, ways:%d, sets:%"PRIu32", index:%"PRIu32", tag block groups:%"PRIu32"", (dmode.cache_size>>10),
  128. dmode.cache_line_size, dmode.cache_ways, sets, index, dcache_tagmem_blk_gs);
  129. /* For I or D cache tagmem retention, backup and restore are performed through
  130. * RTC DMA (its bus width is 128 bits), For I/D Cache tagmem blocks (i-cache
  131. * tagmem blocks = 92 bits, d-cache tagmem blocks = 88 bits), RTC DMA automatically
  132. * aligns its bit width to 96 bits, therefore, 3 times RTC DMA can transfer 4
  133. * i/d-cache tagmem blocks (128 bits * 3 = 96 bits * 4) */
  134. return (((icache_tagmem_blk_gs + dcache_tagmem_blk_gs) << 2) * 3);
  135. }
  136. #endif // CONFIG_PM_RESTORE_CACHE_TAGMEM_AFTER_LIGHT_SLEEP
  137. static esp_err_t esp_sleep_tagmem_pd_low_init(void)
  138. {
  139. #if CONFIG_PM_RESTORE_CACHE_TAGMEM_AFTER_LIGHT_SLEEP
  140. if (s_cpu_retention.retent.tagmem.link_addr == NULL) {
  141. extern char _stext[], _etext[];
  142. uint32_t code_start = (uint32_t)_stext;
  143. uint32_t code_size = (uint32_t)(_etext - _stext);
  144. #if !(CONFIG_SPIRAM && CONFIG_SOC_PM_SUPPORT_TAGMEM_PD)
  145. extern char _rodata_start[], _rodata_reserved_end[];
  146. uint32_t data_start = (uint32_t)_rodata_start;
  147. uint32_t data_size = (uint32_t)(_rodata_reserved_end - _rodata_start);
  148. #else
  149. uint32_t data_start = SOC_DROM_LOW;
  150. uint32_t data_size = SOC_EXTRAM_DATA_SIZE;
  151. #endif
  152. ESP_LOGI(TAG, "Code start at 0x%08"PRIx32", total %"PRIu32", data start at 0x%08"PRIx32", total %"PRIu32" Bytes",
  153. code_start, code_size, data_start, data_size);
  154. uint32_t tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size);
  155. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, 1,
  156. tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE,
  157. MALLOC_CAP_RETENTION);
  158. if (buf) {
  159. s_cpu_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf,
  160. buf + RTC_HAL_DMA_LINK_NODE_SIZE, tagmem_sz, NULL);
  161. } else {
  162. s_cpu_retention.retent.tagmem.icache.enable = 0;
  163. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  164. s_cpu_retention.retent.tagmem.link_addr = NULL;
  165. return ESP_ERR_NO_MEM;
  166. }
  167. }
  168. #else // CONFIG_PM_RESTORE_CACHE_TAGMEM_AFTER_LIGHT_SLEEP
  169. s_cpu_retention.retent.tagmem.icache.enable = 0;
  170. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  171. s_cpu_retention.retent.tagmem.link_addr = NULL;
  172. #endif // CONFIG_PM_RESTORE_CACHE_TAGMEM_AFTER_LIGHT_SLEEP
  173. return ESP_OK;
  174. }
  175. static esp_err_t esp_sleep_tagmem_pd_low_deinit(void)
  176. {
  177. #if SOC_PM_SUPPORT_TAGMEM_PD
  178. if (s_cpu_retention.retent.tagmem.link_addr) {
  179. heap_caps_free(s_cpu_retention.retent.tagmem.link_addr);
  180. s_cpu_retention.retent.tagmem.icache.enable = 0;
  181. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  182. s_cpu_retention.retent.tagmem.link_addr = NULL;
  183. }
  184. #endif
  185. return ESP_OK;
  186. }
  187. #endif // SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  188. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  189. esp_err_t esp_sleep_cpu_pd_low_init(void)
  190. {
  191. if (s_cpu_retention.retent.cpu_pd_mem == NULL) {
  192. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, 1,
  193. SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE,
  194. MALLOC_CAP_RETENTION);
  195. if (buf) {
  196. s_cpu_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
  197. buf + RTC_HAL_DMA_LINK_NODE_SIZE, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE, NULL);
  198. } else {
  199. return ESP_ERR_NO_MEM;
  200. }
  201. }
  202. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  203. if (esp_sleep_tagmem_pd_low_init() != ESP_OK) {
  204. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  205. esp_sleep_cpu_pd_low_deinit();
  206. return ESP_ERR_NO_MEM;
  207. #endif
  208. }
  209. #endif
  210. return ESP_OK;
  211. }
  212. esp_err_t esp_sleep_cpu_pd_low_deinit(void)
  213. {
  214. if (s_cpu_retention.retent.cpu_pd_mem) {
  215. heap_caps_free(s_cpu_retention.retent.cpu_pd_mem);
  216. s_cpu_retention.retent.cpu_pd_mem = NULL;
  217. }
  218. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  219. if (esp_sleep_tagmem_pd_low_deinit() != ESP_OK) {
  220. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  221. esp_sleep_cpu_pd_low_deinit();
  222. return ESP_ERR_NO_MEM;
  223. #endif
  224. }
  225. #endif
  226. return ESP_OK;
  227. }
  228. void sleep_enable_cpu_retention(void)
  229. {
  230. rtc_cntl_hal_enable_cpu_retention(&s_cpu_retention.retent);
  231. #if SOC_PM_SUPPORT_TAGMEM_PD
  232. rtc_cntl_hal_enable_tagmem_retention(&s_cpu_retention.retent);
  233. #endif
  234. }
  235. void IRAM_ATTR sleep_disable_cpu_retention(void)
  236. {
  237. rtc_cntl_hal_disable_cpu_retention(&s_cpu_retention.retent);
  238. #if SOC_PM_SUPPORT_TAGMEM_PD
  239. rtc_cntl_hal_disable_tagmem_retention(&s_cpu_retention.retent);
  240. #endif
  241. }
  242. #endif
  243. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  244. #define CUSTOM_CSR_PCER_MACHINE 0x7e0
  245. #define CUSTOM_CSR_PCMR_MACHINE 0x7e1
  246. #define CUSTOM_CSR_PCCR_MACHINE 0x7e2
  247. #define CUSTOM_CSR_CPU_TESTBUS_CTRL 0x7e3
  248. #define CUSTOM_CSR_PCER_USER 0x800
  249. #define CUSTOM_CSR_PCMR_USER 0x801
  250. #define CUSTOM_CSR_PCCR_USER 0x802
  251. #define CUSTOM_CSR_GPIO_OEN_USER 0x803
  252. #define CUSTOM_CSR_GPIO_IN_USER 0x804
  253. #define CUSTOM_CSR_GPIO_OUT_USER 0x805
  254. #define CUSTOM_CSR_CO_EXCEPTION_CAUSE 0x7f0
  255. #define CUSTOM_CSR_CO_HWLP 0x7f1
  256. #define CUSTOM_CSR_CO_AIA 0x7f2
  257. extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
  258. static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num)
  259. {
  260. const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num;
  261. int regs_frame_sz = 0;
  262. for (int num = 0; num < region_num; num++) {
  263. regs_frame_sz += regions[num].end - regions[num].start;
  264. }
  265. void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  266. if (frame) {
  267. cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t));
  268. memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t));
  269. void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz;
  270. memset(regs_frame, 0, regs_frame_sz);
  271. *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) {
  272. .region = region,
  273. .region_num = region_num,
  274. .regs_frame = (uint32_t *)regs_frame
  275. };
  276. }
  277. return frame;
  278. }
  279. static inline void * cpu_domain_intpri_sleep_frame_alloc_and_init(void)
  280. {
  281. const static cpu_domain_dev_regs_region_t regions[] = {
  282. { .start = INTPRI_CORE0_CPU_INT_ENABLE_REG, .end = INTPRI_RND_ECO_LOW_REG + 4 },
  283. { .start = INTPRI_RND_ECO_HIGH_REG, .end = INTPRI_RND_ECO_HIGH_REG + 4 }
  284. };
  285. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  286. }
  287. static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
  288. {
  289. const static cpu_domain_dev_regs_region_t regions[] = {
  290. #if CONFIG_IDF_TARGET_ESP32C6
  291. { .start = EXTMEM_L1_CACHE_CTRL_REG, .end = EXTMEM_L1_CACHE_CTRL_REG + 4 },
  292. { .start = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
  293. #elif CONFIG_IDF_TARGET_ESP32H2
  294. { .start = CACHE_L1_CACHE_CTRL_REG, .end = CACHE_L1_CACHE_CTRL_REG + 4 },
  295. { .start = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
  296. #endif
  297. };
  298. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  299. }
  300. static inline void * cpu_domain_plic_sleep_frame_alloc_and_init(void)
  301. {
  302. const static cpu_domain_dev_regs_region_t regions[] = {
  303. { .start = PLIC_MXINT_ENABLE_REG, .end = PLIC_MXINT_CLAIM_REG + 4 },
  304. { .start = PLIC_MXINT_CONF_REG, .end = PLIC_MXINT_CONF_REG + 4 },
  305. { .start = PLIC_UXINT_ENABLE_REG, .end = PLIC_UXINT_CLAIM_REG + 4 },
  306. { .start = PLIC_UXINT_CONF_REG, .end = PLIC_UXINT_CONF_REG + 4 }
  307. };
  308. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  309. }
  310. static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(void)
  311. {
  312. const static cpu_domain_dev_regs_region_t regions[] = {
  313. { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_MTIMECMP_H_REG + 4 },
  314. { .start = CLINT_UINT_SIP_REG, .end = CLINT_UINT_UTIMECMP_H_REG + 4 }
  315. };
  316. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  317. }
  318. static esp_err_t esp_sleep_cpu_retention_init_impl(void)
  319. {
  320. if (s_cpu_retention.retent.critical_frame == NULL) {
  321. void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  322. if (frame == NULL) {
  323. goto err;
  324. }
  325. s_cpu_retention.retent.critical_frame = (RvCoreCriticalSleepFrame *)frame;
  326. rv_core_critical_regs_frame = (RvCoreCriticalSleepFrame *)frame;
  327. }
  328. if (s_cpu_retention.retent.non_critical_frame == NULL) {
  329. void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  330. if (frame == NULL) {
  331. goto err;
  332. }
  333. s_cpu_retention.retent.non_critical_frame = (RvCoreNonCriticalSleepFrame *)frame;
  334. }
  335. if (s_cpu_retention.retent.intpri_frame == NULL) {
  336. void *frame = cpu_domain_intpri_sleep_frame_alloc_and_init();
  337. if (frame == NULL) {
  338. goto err;
  339. }
  340. s_cpu_retention.retent.intpri_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  341. }
  342. if (s_cpu_retention.retent.cache_config_frame == NULL) {
  343. void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
  344. if (frame == NULL) {
  345. goto err;
  346. }
  347. s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  348. }
  349. if (s_cpu_retention.retent.plic_frame == NULL) {
  350. void *frame = cpu_domain_plic_sleep_frame_alloc_and_init();
  351. if (frame == NULL) {
  352. goto err;
  353. }
  354. s_cpu_retention.retent.plic_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  355. }
  356. if (s_cpu_retention.retent.clint_frame == NULL) {
  357. void *frame = cpu_domain_clint_sleep_frame_alloc_and_init();
  358. if (frame == NULL) {
  359. goto err;
  360. }
  361. s_cpu_retention.retent.clint_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  362. }
  363. return ESP_OK;
  364. err:
  365. esp_sleep_cpu_retention_deinit();
  366. return ESP_ERR_NO_MEM;
  367. }
  368. static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
  369. {
  370. if (s_cpu_retention.retent.critical_frame) {
  371. heap_caps_free((void *)s_cpu_retention.retent.critical_frame);
  372. s_cpu_retention.retent.critical_frame = NULL;
  373. rv_core_critical_regs_frame = NULL;
  374. }
  375. if (s_cpu_retention.retent.non_critical_frame) {
  376. heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame);
  377. s_cpu_retention.retent.non_critical_frame = NULL;
  378. }
  379. if (s_cpu_retention.retent.intpri_frame) {
  380. heap_caps_free((void *)s_cpu_retention.retent.intpri_frame);
  381. s_cpu_retention.retent.intpri_frame = NULL;
  382. }
  383. if (s_cpu_retention.retent.cache_config_frame) {
  384. heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
  385. s_cpu_retention.retent.cache_config_frame = NULL;
  386. }
  387. if (s_cpu_retention.retent.plic_frame) {
  388. heap_caps_free((void *)s_cpu_retention.retent.plic_frame);
  389. s_cpu_retention.retent.plic_frame = NULL;
  390. }
  391. if (s_cpu_retention.retent.clint_frame) {
  392. heap_caps_free((void *)s_cpu_retention.retent.clint_frame);
  393. s_cpu_retention.retent.clint_frame = NULL;
  394. }
  395. return ESP_OK;
  396. }
  397. static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void)
  398. {
  399. uint32_t mstatus;
  400. __asm__ __volatile__ (
  401. "csrr %0, mstatus\n"
  402. "csrci mstatus, 0x8\n"
  403. : "=r"(mstatus)
  404. );
  405. return mstatus;
  406. }
  407. static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus)
  408. {
  409. __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus));
  410. }
  411. static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
  412. {
  413. assert(s_cpu_retention.retent.non_critical_frame);
  414. RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame;
  415. frame->mscratch = RV_READ_CSR(mscratch);
  416. frame->mideleg = RV_READ_CSR(mideleg);
  417. frame->misa = RV_READ_CSR(misa);
  418. frame->tselect = RV_READ_CSR(tselect);
  419. frame->tdata1 = RV_READ_CSR(tdata1);
  420. frame->tdata2 = RV_READ_CSR(tdata2);
  421. frame->tcontrol = RV_READ_CSR(tcontrol);
  422. frame->pmpaddr0 = RV_READ_CSR(pmpaddr0);
  423. frame->pmpaddr1 = RV_READ_CSR(pmpaddr1);
  424. frame->pmpaddr2 = RV_READ_CSR(pmpaddr2);
  425. frame->pmpaddr3 = RV_READ_CSR(pmpaddr3);
  426. frame->pmpaddr4 = RV_READ_CSR(pmpaddr4);
  427. frame->pmpaddr5 = RV_READ_CSR(pmpaddr5);
  428. frame->pmpaddr6 = RV_READ_CSR(pmpaddr6);
  429. frame->pmpaddr7 = RV_READ_CSR(pmpaddr7);
  430. frame->pmpaddr8 = RV_READ_CSR(pmpaddr8);
  431. frame->pmpaddr9 = RV_READ_CSR(pmpaddr9);
  432. frame->pmpaddr10 = RV_READ_CSR(pmpaddr10);
  433. frame->pmpaddr11 = RV_READ_CSR(pmpaddr11);
  434. frame->pmpaddr12 = RV_READ_CSR(pmpaddr12);
  435. frame->pmpaddr13 = RV_READ_CSR(pmpaddr13);
  436. frame->pmpaddr14 = RV_READ_CSR(pmpaddr14);
  437. frame->pmpaddr15 = RV_READ_CSR(pmpaddr15);
  438. frame->pmpcfg0 = RV_READ_CSR(pmpcfg0);
  439. frame->pmpcfg1 = RV_READ_CSR(pmpcfg1);
  440. frame->pmpcfg2 = RV_READ_CSR(pmpcfg2);
  441. frame->pmpcfg3 = RV_READ_CSR(pmpcfg3);
  442. #if SOC_CPU_HAS_PMA
  443. frame->pmaaddr0 = RV_READ_CSR(CSR_PMAADDR(0));
  444. frame->pmaaddr1 = RV_READ_CSR(CSR_PMAADDR(1));
  445. frame->pmaaddr2 = RV_READ_CSR(CSR_PMAADDR(2));
  446. frame->pmaaddr3 = RV_READ_CSR(CSR_PMAADDR(3));
  447. frame->pmaaddr4 = RV_READ_CSR(CSR_PMAADDR(4));
  448. frame->pmaaddr5 = RV_READ_CSR(CSR_PMAADDR(5));
  449. frame->pmaaddr6 = RV_READ_CSR(CSR_PMAADDR(6));
  450. frame->pmaaddr7 = RV_READ_CSR(CSR_PMAADDR(7));
  451. frame->pmaaddr8 = RV_READ_CSR(CSR_PMAADDR(8));
  452. frame->pmaaddr9 = RV_READ_CSR(CSR_PMAADDR(9));
  453. frame->pmaaddr10 = RV_READ_CSR(CSR_PMAADDR(10));
  454. frame->pmaaddr11 = RV_READ_CSR(CSR_PMAADDR(11));
  455. frame->pmaaddr12 = RV_READ_CSR(CSR_PMAADDR(12));
  456. frame->pmaaddr13 = RV_READ_CSR(CSR_PMAADDR(13));
  457. frame->pmaaddr14 = RV_READ_CSR(CSR_PMAADDR(14));
  458. frame->pmaaddr15 = RV_READ_CSR(CSR_PMAADDR(15));
  459. frame->pmacfg0 = RV_READ_CSR(CSR_PMACFG(0));
  460. frame->pmacfg1 = RV_READ_CSR(CSR_PMACFG(1));
  461. frame->pmacfg2 = RV_READ_CSR(CSR_PMACFG(2));
  462. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(3));
  463. frame->pmacfg4 = RV_READ_CSR(CSR_PMACFG(4));
  464. frame->pmacfg5 = RV_READ_CSR(CSR_PMACFG(5));
  465. frame->pmacfg6 = RV_READ_CSR(CSR_PMACFG(6));
  466. frame->pmacfg7 = RV_READ_CSR(CSR_PMACFG(7));
  467. frame->pmacfg8 = RV_READ_CSR(CSR_PMACFG(8));
  468. frame->pmacfg9 = RV_READ_CSR(CSR_PMACFG(9));
  469. frame->pmacfg10 = RV_READ_CSR(CSR_PMACFG(10));
  470. frame->pmacfg11 = RV_READ_CSR(CSR_PMACFG(11));
  471. frame->pmacfg12 = RV_READ_CSR(CSR_PMACFG(12));
  472. frame->pmacfg13 = RV_READ_CSR(CSR_PMACFG(13));
  473. frame->pmacfg14 = RV_READ_CSR(CSR_PMACFG(14));
  474. frame->pmacfg15 = RV_READ_CSR(CSR_PMACFG(15));
  475. #endif // SOC_CPU_HAS_PMA
  476. frame->utvec = RV_READ_CSR(utvec);
  477. frame->ustatus = RV_READ_CSR(ustatus);
  478. frame->uepc = RV_READ_CSR(uepc);
  479. frame->ucause = RV_READ_CSR(ucause);
  480. frame->mpcer = RV_READ_CSR(CUSTOM_CSR_PCER_MACHINE);
  481. frame->mpcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_MACHINE);
  482. frame->mpccr = RV_READ_CSR(CUSTOM_CSR_PCCR_MACHINE);
  483. frame->cpu_testbus_ctrl = RV_READ_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL);
  484. frame->upcer = RV_READ_CSR(CUSTOM_CSR_PCER_USER);
  485. frame->upcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_USER);
  486. frame->upccr = RV_READ_CSR(CUSTOM_CSR_PCCR_USER);
  487. frame->ugpio_oen = RV_READ_CSR(CUSTOM_CSR_GPIO_OEN_USER);
  488. frame->ugpio_in = RV_READ_CSR(CUSTOM_CSR_GPIO_IN_USER);
  489. frame->ugpio_out = RV_READ_CSR(CUSTOM_CSR_GPIO_OUT_USER);
  490. return frame;
  491. }
  492. static IRAM_ATTR void rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame *frame)
  493. {
  494. assert(frame);
  495. RV_WRITE_CSR(mscratch, frame->mscratch);
  496. RV_WRITE_CSR(mideleg, frame->mideleg);
  497. RV_WRITE_CSR(misa, frame->misa);
  498. RV_WRITE_CSR(tselect, frame->tselect);
  499. RV_WRITE_CSR(tdata1, frame->tdata1);
  500. RV_WRITE_CSR(tdata2, frame->tdata2);
  501. RV_WRITE_CSR(tcontrol, frame->tcontrol);
  502. RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0);
  503. RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1);
  504. RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2);
  505. RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3);
  506. RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4);
  507. RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5);
  508. RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6);
  509. RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7);
  510. RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8);
  511. RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9);
  512. RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10);
  513. RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11);
  514. RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12);
  515. RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13);
  516. RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14);
  517. RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15);
  518. RV_WRITE_CSR(pmpcfg0, frame->pmpcfg0);
  519. RV_WRITE_CSR(pmpcfg1, frame->pmpcfg1);
  520. RV_WRITE_CSR(pmpcfg2, frame->pmpcfg2);
  521. RV_WRITE_CSR(pmpcfg3, frame->pmpcfg3);
  522. #if SOC_CPU_HAS_PMA
  523. RV_WRITE_CSR(CSR_PMAADDR(0), frame->pmaaddr0);
  524. RV_WRITE_CSR(CSR_PMAADDR(1), frame->pmaaddr1);
  525. RV_WRITE_CSR(CSR_PMAADDR(2), frame->pmaaddr2);
  526. RV_WRITE_CSR(CSR_PMAADDR(3), frame->pmaaddr3);
  527. RV_WRITE_CSR(CSR_PMAADDR(4), frame->pmaaddr4);
  528. RV_WRITE_CSR(CSR_PMAADDR(5), frame->pmaaddr5);
  529. RV_WRITE_CSR(CSR_PMAADDR(6), frame->pmaaddr6);
  530. RV_WRITE_CSR(CSR_PMAADDR(7), frame->pmaaddr7);
  531. RV_WRITE_CSR(CSR_PMAADDR(8), frame->pmaaddr8);
  532. RV_WRITE_CSR(CSR_PMAADDR(9), frame->pmaaddr9);
  533. RV_WRITE_CSR(CSR_PMAADDR(10),frame->pmaaddr10);
  534. RV_WRITE_CSR(CSR_PMAADDR(11),frame->pmaaddr11);
  535. RV_WRITE_CSR(CSR_PMAADDR(12),frame->pmaaddr12);
  536. RV_WRITE_CSR(CSR_PMAADDR(13),frame->pmaaddr13);
  537. RV_WRITE_CSR(CSR_PMAADDR(14),frame->pmaaddr14);
  538. RV_WRITE_CSR(CSR_PMAADDR(15),frame->pmaaddr15);
  539. RV_WRITE_CSR(CSR_PMACFG(0), frame->pmacfg0);
  540. RV_WRITE_CSR(CSR_PMACFG(1), frame->pmacfg1);
  541. RV_WRITE_CSR(CSR_PMACFG(2), frame->pmacfg2);
  542. RV_WRITE_CSR(CSR_PMACFG(3), frame->pmacfg3);
  543. RV_WRITE_CSR(CSR_PMACFG(4), frame->pmacfg4);
  544. RV_WRITE_CSR(CSR_PMACFG(5), frame->pmacfg5);
  545. RV_WRITE_CSR(CSR_PMACFG(6), frame->pmacfg6);
  546. RV_WRITE_CSR(CSR_PMACFG(7), frame->pmacfg7);
  547. RV_WRITE_CSR(CSR_PMACFG(8), frame->pmacfg8);
  548. RV_WRITE_CSR(CSR_PMACFG(9), frame->pmacfg9);
  549. RV_WRITE_CSR(CSR_PMACFG(10), frame->pmacfg10);
  550. RV_WRITE_CSR(CSR_PMACFG(11), frame->pmacfg11);
  551. RV_WRITE_CSR(CSR_PMACFG(12), frame->pmacfg12);
  552. RV_WRITE_CSR(CSR_PMACFG(13), frame->pmacfg13);
  553. RV_WRITE_CSR(CSR_PMACFG(14), frame->pmacfg14);
  554. RV_WRITE_CSR(CSR_PMACFG(15), frame->pmacfg15);
  555. #endif //SOC_CPU_HAS_PMA
  556. RV_WRITE_CSR(utvec, frame->utvec);
  557. RV_WRITE_CSR(ustatus, frame->ustatus);
  558. RV_WRITE_CSR(uepc, frame->uepc);
  559. RV_WRITE_CSR(ucause, frame->ucause);
  560. RV_WRITE_CSR(CUSTOM_CSR_PCER_MACHINE, frame->mpcer);
  561. RV_WRITE_CSR(CUSTOM_CSR_PCMR_MACHINE, frame->mpcmr);
  562. RV_WRITE_CSR(CUSTOM_CSR_PCCR_MACHINE, frame->mpccr);
  563. RV_WRITE_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL, frame->cpu_testbus_ctrl);
  564. RV_WRITE_CSR(CUSTOM_CSR_PCER_USER, frame->upcer);
  565. RV_WRITE_CSR(CUSTOM_CSR_PCMR_USER, frame->upcmr);
  566. RV_WRITE_CSR(CUSTOM_CSR_PCCR_USER, frame->upccr);
  567. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OEN_USER,frame->ugpio_oen);
  568. RV_WRITE_CSR(CUSTOM_CSR_GPIO_IN_USER, frame->ugpio_in);
  569. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OUT_USER,frame->ugpio_out);
  570. }
  571. static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame)
  572. {
  573. assert(frame);
  574. cpu_domain_dev_regs_region_t *region = frame->region;
  575. uint32_t *regs_frame = frame->regs_frame;
  576. int offset = 0;
  577. for (int i = 0; i < frame->region_num; i++) {
  578. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  579. regs_frame[offset++] = *(uint32_t *)addr;
  580. }
  581. }
  582. }
  583. static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame)
  584. {
  585. assert(frame);
  586. cpu_domain_dev_regs_region_t *region = frame->region;
  587. uint32_t *regs_frame = frame->regs_frame;
  588. int offset = 0;
  589. for (int i = 0; i < frame->region_num; i++) {
  590. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  591. *(uint32_t *)addr = regs_frame[offset++];
  592. }
  593. }
  594. }
  595. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  596. static IRAM_ATTR void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
  597. {
  598. *(frame_crc_ptr) = esp_crc32_le(0, (void *)frame_ptr, frame_check_size);
  599. }
  600. static IRAM_ATTR void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
  601. {
  602. if(*(frame_crc_ptr) != esp_crc32_le(0, (void *)(frame_ptr), frame_check_size)){
  603. // resume uarts
  604. for (int i = 0; i < SOC_UART_NUM; ++i) {
  605. #ifndef CONFIG_IDF_TARGET_ESP32
  606. if (!uart_ll_is_enabled(i)) {
  607. continue;
  608. }
  609. #endif
  610. uart_ll_force_xon(i);
  611. }
  612. /* Since it is still in the critical now, use ESP_EARLY_LOG */
  613. ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted");
  614. esp_restart_noos();
  615. }
  616. }
  617. #endif
  618. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void);
  619. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void);
  620. typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
  621. static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
  622. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  623. {
  624. RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save();
  625. if ((frame->pmufunc & 0x3) == 0x1) {
  626. esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_END, (void *)0);
  627. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  628. /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */
  629. update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
  630. #endif
  631. REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore);
  632. return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  633. }
  634. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  635. else {
  636. validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
  637. }
  638. #endif
  639. return pmu_sleep_finish();
  640. }
  641. esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
  642. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  643. {
  644. esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0);
  645. uint32_t mstatus = save_mstatus_and_disable_global_int();
  646. cpu_domain_dev_regs_save(s_cpu_retention.retent.plic_frame);
  647. cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
  648. cpu_domain_dev_regs_save(s_cpu_retention.retent.intpri_frame);
  649. cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
  650. RvCoreNonCriticalSleepFrame *frame = rv_core_noncritical_regs_save();
  651. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  652. /* Minus sizeof(long) is for bypass `frame_crc` field */
  653. update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
  654. #endif
  655. esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  656. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  657. validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
  658. #endif
  659. rv_core_noncritical_regs_restore(frame);
  660. cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
  661. cpu_domain_dev_regs_restore(s_cpu_retention.retent.intpri_frame);
  662. cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
  663. cpu_domain_dev_regs_restore(s_cpu_retention.retent.plic_frame);
  664. restore_mstatus(mstatus);
  665. return err;
  666. }
  667. #endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  668. #if SOC_PM_SUPPORT_CPU_PD
  669. esp_err_t esp_sleep_cpu_retention_init(void)
  670. {
  671. esp_err_t err = ESP_OK;
  672. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  673. err = esp_sleep_cpu_pd_low_init();
  674. #elif SOC_PM_CPU_RETENTION_BY_SW
  675. err = esp_sleep_cpu_retention_init_impl();
  676. #endif
  677. return err;
  678. }
  679. esp_err_t esp_sleep_cpu_retention_deinit(void)
  680. {
  681. esp_err_t err = ESP_OK;
  682. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  683. err = esp_sleep_cpu_pd_low_deinit();
  684. #elif SOC_PM_CPU_RETENTION_BY_SW
  685. err = esp_sleep_cpu_retention_deinit_impl();
  686. #endif
  687. return err;
  688. }
  689. bool cpu_domain_pd_allowed(void)
  690. {
  691. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  692. return (s_cpu_retention.retent.cpu_pd_mem != NULL);
  693. #elif SOC_PM_CPU_RETENTION_BY_SW
  694. return (s_cpu_retention.retent.critical_frame != NULL) && \
  695. (s_cpu_retention.retent.non_critical_frame != NULL) && \
  696. (s_cpu_retention.retent.intpri_frame != NULL) && \
  697. (s_cpu_retention.retent.cache_config_frame != NULL) && \
  698. (s_cpu_retention.retent.plic_frame != NULL) && \
  699. (s_cpu_retention.retent.clint_frame != NULL);
  700. #else
  701. return false;
  702. #endif
  703. }
  704. esp_err_t sleep_cpu_configure(bool light_sleep_enable)
  705. {
  706. #if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP
  707. if (light_sleep_enable) {
  708. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep.");
  709. } else {
  710. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory");
  711. }
  712. #endif
  713. return ESP_OK;
  714. }
  715. #endif