sleep_cpu.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stddef.h>
  7. #include <string.h>
  8. #include <inttypes.h>
  9. #include <sys/lock.h>
  10. #include <sys/param.h>
  11. #include "esp_attr.h"
  12. #include "esp_check.h"
  13. #include "esp_sleep.h"
  14. #include "esp_log.h"
  15. #include "esp_crc.h"
  16. #include "freertos/FreeRTOS.h"
  17. #include "freertos/task.h"
  18. #include "esp_heap_caps.h"
  19. #include "soc/soc_caps.h"
  20. #include "esp_private/sleep_cpu.h"
  21. #include "sdkconfig.h"
  22. #if SOC_PMU_SUPPORTED
  23. #include "esp_private/esp_pmu.h"
  24. #else
  25. #include "hal/rtc_hal.h"
  26. #endif
  27. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  28. #include "esp_private/system_internal.h"
  29. #include "hal/clk_gate_ll.h"
  30. #include "hal/uart_hal.h"
  31. #endif
  32. #include "soc/rtc_periph.h"
  33. #ifdef CONFIG_IDF_TARGET_ESP32S3
  34. #include "esp32s3/rom/cache.h"
  35. #elif CONFIG_IDF_TARGET_ESP32C6
  36. #include "esp32c6/rom/rtc.h"
  37. #include "riscv/rvsleep-frames.h"
  38. #include "soc/intpri_reg.h"
  39. #include "soc/extmem_reg.h"
  40. #include "soc/plic_reg.h"
  41. #include "soc/clint_reg.h"
  42. #include "esp32c6/rom/cache.h"
  43. #elif CONFIG_IDF_TARGET_ESP32H2
  44. #include "esp32h2/rom/rtc.h"
  45. #include "riscv/rvsleep-frames.h"
  46. #include "soc/intpri_reg.h"
  47. #include "soc/extmem_reg.h"
  48. #include "soc/plic_reg.h"
  49. #include "soc/clint_reg.h"
  50. #include "esp32h2/rom/cache.h"
  51. #endif
  52. static __attribute__((unused)) const char *TAG = "sleep";
  53. typedef struct {
  54. uint32_t start;
  55. uint32_t end;
  56. } cpu_domain_dev_regs_region_t;
  57. typedef struct {
  58. cpu_domain_dev_regs_region_t *region;
  59. int region_num;
  60. uint32_t *regs_frame;
  61. } cpu_domain_dev_sleep_frame_t;
  62. /**
  63. * Internal structure which holds all requested light sleep cpu retention parameters
  64. */
  65. typedef struct {
  66. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  67. rtc_cntl_sleep_retent_t retent;
  68. #elif SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  69. struct {
  70. RvCoreCriticalSleepFrame *critical_frame;
  71. RvCoreNonCriticalSleepFrame *non_critical_frame;
  72. cpu_domain_dev_sleep_frame_t *intpri_frame;
  73. cpu_domain_dev_sleep_frame_t *cache_config_frame;
  74. cpu_domain_dev_sleep_frame_t *plic_frame;
  75. cpu_domain_dev_sleep_frame_t *clint_frame;
  76. } retent;
  77. #endif
  78. } sleep_cpu_retention_t;
  79. static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
  80. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  81. #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  82. static uint32_t cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size)
  83. {
  84. uint32_t sets; /* i/d-cache total set counts */
  85. uint32_t index; /* virtual address mapping i/d-cache row offset */
  86. uint32_t waysgrp;
  87. uint32_t icache_tagmem_blk_gs, dcache_tagmem_blk_gs;
  88. struct cache_mode imode = { .icache = 1 };
  89. struct cache_mode dmode = { .icache = 0 };
  90. /* calculate/prepare i-cache tag memory retention parameters */
  91. Cache_Get_Mode(&imode);
  92. sets = imode.cache_size / imode.cache_ways / imode.cache_line_size;
  93. index = (code_seg_vaddr / imode.cache_line_size) % sets;
  94. waysgrp = imode.cache_ways >> 2;
  95. code_seg_size = ALIGNUP(imode.cache_line_size, code_seg_size);
  96. s_cpu_retention.retent.tagmem.icache.start_point = index;
  97. s_cpu_retention.retent.tagmem.icache.size = (sets * waysgrp) & 0xff;
  98. s_cpu_retention.retent.tagmem.icache.vld_size = s_cpu_retention.retent.tagmem.icache.size;
  99. if (code_seg_size < imode.cache_size / imode.cache_ways) {
  100. s_cpu_retention.retent.tagmem.icache.vld_size = (code_seg_size / imode.cache_line_size) * waysgrp;
  101. }
  102. s_cpu_retention.retent.tagmem.icache.enable = (code_seg_size != 0) ? 1 : 0;
  103. icache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.icache.vld_size ? s_cpu_retention.retent.tagmem.icache.vld_size : sets * waysgrp;
  104. icache_tagmem_blk_gs = ALIGNUP(4, icache_tagmem_blk_gs);
  105. ESP_LOGD(TAG, "I-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (imode.cache_size>>10),
  106. imode.cache_line_size, imode.cache_ways, sets, index, icache_tagmem_blk_gs);
  107. /* calculate/prepare d-cache tag memory retention parameters */
  108. Cache_Get_Mode(&dmode);
  109. sets = dmode.cache_size / dmode.cache_ways / dmode.cache_line_size;
  110. index = (data_seg_vaddr / dmode.cache_line_size) % sets;
  111. waysgrp = dmode.cache_ways >> 2;
  112. data_seg_size = ALIGNUP(dmode.cache_line_size, data_seg_size);
  113. s_cpu_retention.retent.tagmem.dcache.start_point = index;
  114. s_cpu_retention.retent.tagmem.dcache.size = (sets * waysgrp) & 0x1ff;
  115. s_cpu_retention.retent.tagmem.dcache.vld_size = s_cpu_retention.retent.tagmem.dcache.size;
  116. #ifndef CONFIG_ESP32S3_DATA_CACHE_16KB
  117. if (data_seg_size < dmode.cache_size / dmode.cache_ways) {
  118. s_cpu_retention.retent.tagmem.dcache.vld_size = (data_seg_size / dmode.cache_line_size) * waysgrp;
  119. }
  120. s_cpu_retention.retent.tagmem.dcache.enable = (data_seg_size != 0) ? 1 : 0;
  121. #else
  122. s_cpu_retention.retent.tagmem.dcache.enable = 1;
  123. #endif
  124. dcache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.dcache.vld_size ? s_cpu_retention.retent.tagmem.dcache.vld_size : sets * waysgrp;
  125. dcache_tagmem_blk_gs = ALIGNUP(4, dcache_tagmem_blk_gs);
  126. ESP_LOGD(TAG, "D-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (dmode.cache_size>>10),
  127. dmode.cache_line_size, dmode.cache_ways, sets, index, dcache_tagmem_blk_gs);
  128. /* For I or D cache tagmem retention, backup and restore are performed through
  129. * RTC DMA (its bus width is 128 bits), For I/D Cache tagmem blocks (i-cache
  130. * tagmem blocks = 92 bits, d-cache tagmem blocks = 88 bits), RTC DMA automatically
  131. * aligns its bit width to 96 bits, therefore, 3 times RTC DMA can transfer 4
  132. * i/d-cache tagmem blocks (128 bits * 3 = 96 bits * 4) */
  133. return (((icache_tagmem_blk_gs + dcache_tagmem_blk_gs) << 2) * 3);
  134. }
  135. #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  136. static esp_err_t esp_sleep_tagmem_pd_low_init(void)
  137. {
  138. #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  139. if (s_cpu_retention.retent.tagmem.link_addr == NULL) {
  140. extern char _stext[], _etext[];
  141. uint32_t code_start = (uint32_t)_stext;
  142. uint32_t code_size = (uint32_t)(_etext - _stext);
  143. #if !(CONFIG_SPIRAM && CONFIG_SOC_PM_SUPPORT_TAGMEM_PD)
  144. extern char _rodata_start[], _rodata_reserved_end[];
  145. uint32_t data_start = (uint32_t)_rodata_start;
  146. uint32_t data_size = (uint32_t)(_rodata_reserved_end - _rodata_start);
  147. #else
  148. uint32_t data_start = SOC_DROM_LOW;
  149. uint32_t data_size = SOC_EXTRAM_DATA_SIZE;
  150. #endif
  151. ESP_LOGI(TAG, "Code start at 0x%08"PRIx32", total %"PRIu32", data start at 0x%08"PRIx32", total %"PRIu32" Bytes",
  152. code_start, code_size, data_start, data_size);
  153. uint32_t tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size);
  154. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, 1,
  155. tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE,
  156. MALLOC_CAP_RETENTION);
  157. if (buf) {
  158. s_cpu_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf,
  159. buf + RTC_HAL_DMA_LINK_NODE_SIZE, tagmem_sz, NULL);
  160. } else {
  161. s_cpu_retention.retent.tagmem.icache.enable = 0;
  162. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  163. s_cpu_retention.retent.tagmem.link_addr = NULL;
  164. return ESP_ERR_NO_MEM;
  165. }
  166. }
  167. #else // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  168. s_cpu_retention.retent.tagmem.icache.enable = 0;
  169. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  170. s_cpu_retention.retent.tagmem.link_addr = NULL;
  171. #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  172. return ESP_OK;
  173. }
  174. static esp_err_t esp_sleep_tagmem_pd_low_deinit(void)
  175. {
  176. #if SOC_PM_SUPPORT_TAGMEM_PD
  177. if (s_cpu_retention.retent.tagmem.link_addr) {
  178. heap_caps_free(s_cpu_retention.retent.tagmem.link_addr);
  179. s_cpu_retention.retent.tagmem.icache.enable = 0;
  180. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  181. s_cpu_retention.retent.tagmem.link_addr = NULL;
  182. }
  183. #endif
  184. return ESP_OK;
  185. }
  186. #endif // SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  187. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  188. esp_err_t esp_sleep_cpu_pd_low_init(void)
  189. {
  190. if (s_cpu_retention.retent.cpu_pd_mem == NULL) {
  191. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, 1,
  192. SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE,
  193. MALLOC_CAP_RETENTION);
  194. if (buf) {
  195. s_cpu_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
  196. buf + RTC_HAL_DMA_LINK_NODE_SIZE, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE, NULL);
  197. } else {
  198. return ESP_ERR_NO_MEM;
  199. }
  200. }
  201. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  202. if (esp_sleep_tagmem_pd_low_init() != ESP_OK) {
  203. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  204. esp_sleep_cpu_pd_low_deinit();
  205. return ESP_ERR_NO_MEM;
  206. #endif
  207. }
  208. #endif
  209. return ESP_OK;
  210. }
  211. esp_err_t esp_sleep_cpu_pd_low_deinit(void)
  212. {
  213. if (s_cpu_retention.retent.cpu_pd_mem) {
  214. heap_caps_free(s_cpu_retention.retent.cpu_pd_mem);
  215. s_cpu_retention.retent.cpu_pd_mem = NULL;
  216. }
  217. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  218. if (esp_sleep_tagmem_pd_low_deinit() != ESP_OK) {
  219. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  220. esp_sleep_cpu_pd_low_deinit();
  221. return ESP_ERR_NO_MEM;
  222. #endif
  223. }
  224. #endif
  225. return ESP_OK;
  226. }
  227. void sleep_enable_cpu_retention(void)
  228. {
  229. rtc_cntl_hal_enable_cpu_retention(&s_cpu_retention.retent);
  230. #if SOC_PM_SUPPORT_TAGMEM_PD
  231. rtc_cntl_hal_enable_tagmem_retention(&s_cpu_retention.retent);
  232. #endif
  233. }
  234. void IRAM_ATTR sleep_disable_cpu_retention(void)
  235. {
  236. rtc_cntl_hal_disable_cpu_retention(&s_cpu_retention.retent);
  237. #if SOC_PM_SUPPORT_TAGMEM_PD
  238. rtc_cntl_hal_disable_tagmem_retention(&s_cpu_retention.retent);
  239. #endif
  240. }
  241. #endif
  242. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  243. #define CUSTOM_CSR_PCER_MACHINE 0x7e0
  244. #define CUSTOM_CSR_PCMR_MACHINE 0x7e1
  245. #define CUSTOM_CSR_PCCR_MACHINE 0x7e2
  246. #define CUSTOM_CSR_CPU_TESTBUS_CTRL 0x7e3
  247. #define CUSTOM_CSR_PCER_USER 0x800
  248. #define CUSTOM_CSR_PCMR_USER 0x801
  249. #define CUSTOM_CSR_PCCR_USER 0x802
  250. #define CUSTOM_CSR_GPIO_OEN_USER 0x803
  251. #define CUSTOM_CSR_GPIO_IN_USER 0x804
  252. #define CUSTOM_CSR_GPIO_OUT_USER 0x805
  253. #define CUSTOM_CSR_CO_EXCEPTION_CAUSE 0x7f0
  254. #define CUSTOM_CSR_CO_HWLP 0x7f1
  255. #define CUSTOM_CSR_CO_AIA 0x7f2
  256. extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
  257. static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num)
  258. {
  259. const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num;
  260. int regs_frame_sz = 0;
  261. for (int num = 0; num < region_num; num++) {
  262. regs_frame_sz += regions[num].end - regions[num].start;
  263. }
  264. void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  265. if (frame) {
  266. cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t));
  267. memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t));
  268. void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz;
  269. memset(regs_frame, 0, regs_frame_sz);
  270. *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) {
  271. .region = region,
  272. .region_num = region_num,
  273. .regs_frame = (uint32_t *)regs_frame
  274. };
  275. }
  276. return frame;
  277. }
  278. static inline void * cpu_domain_intpri_sleep_frame_alloc_and_init(void)
  279. {
  280. const static cpu_domain_dev_regs_region_t regions[] = {
  281. { .start = INTPRI_CORE0_CPU_INT_ENABLE_REG, .end = INTPRI_RND_ECO_LOW_REG + 4 },
  282. { .start = INTPRI_RND_ECO_HIGH_REG, .end = INTPRI_RND_ECO_HIGH_REG + 4 }
  283. };
  284. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  285. }
  286. static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
  287. {
  288. const static cpu_domain_dev_regs_region_t regions[] = {
  289. #if CONFIG_IDF_TARGET_ESP32C6
  290. { .start = EXTMEM_L1_CACHE_CTRL_REG, .end = EXTMEM_L1_CACHE_CTRL_REG + 4 },
  291. { .start = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
  292. #elif CONFIG_IDF_TARGET_ESP32H2
  293. { .start = CACHE_L1_CACHE_CTRL_REG, .end = CACHE_L1_CACHE_CTRL_REG + 4 },
  294. { .start = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
  295. #endif
  296. };
  297. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  298. }
  299. static inline void * cpu_domain_plic_sleep_frame_alloc_and_init(void)
  300. {
  301. const static cpu_domain_dev_regs_region_t regions[] = {
  302. { .start = PLIC_MXINT_ENABLE_REG, .end = PLIC_MXINT_CLAIM_REG + 4 },
  303. { .start = PLIC_MXINT_CONF_REG, .end = PLIC_MXINT_CONF_REG + 4 },
  304. { .start = PLIC_UXINT_ENABLE_REG, .end = PLIC_UXINT_CLAIM_REG + 4 },
  305. { .start = PLIC_UXINT_CONF_REG, .end = PLIC_UXINT_CONF_REG + 4 }
  306. };
  307. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  308. }
  309. static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(void)
  310. {
  311. const static cpu_domain_dev_regs_region_t regions[] = {
  312. { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_MTIMECMP_H_REG + 4 },
  313. { .start = CLINT_UINT_SIP_REG, .end = CLINT_UINT_UTIMECMP_H_REG + 4 }
  314. };
  315. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  316. }
  317. static esp_err_t esp_sleep_cpu_retention_init_impl(void)
  318. {
  319. if (s_cpu_retention.retent.critical_frame == NULL) {
  320. void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  321. if (frame == NULL) {
  322. goto err;
  323. }
  324. s_cpu_retention.retent.critical_frame = (RvCoreCriticalSleepFrame *)frame;
  325. rv_core_critical_regs_frame = (RvCoreCriticalSleepFrame *)frame;
  326. }
  327. if (s_cpu_retention.retent.non_critical_frame == NULL) {
  328. void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  329. if (frame == NULL) {
  330. goto err;
  331. }
  332. s_cpu_retention.retent.non_critical_frame = (RvCoreNonCriticalSleepFrame *)frame;
  333. }
  334. if (s_cpu_retention.retent.intpri_frame == NULL) {
  335. void *frame = cpu_domain_intpri_sleep_frame_alloc_and_init();
  336. if (frame == NULL) {
  337. goto err;
  338. }
  339. s_cpu_retention.retent.intpri_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  340. }
  341. if (s_cpu_retention.retent.cache_config_frame == NULL) {
  342. void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
  343. if (frame == NULL) {
  344. goto err;
  345. }
  346. s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  347. }
  348. if (s_cpu_retention.retent.plic_frame == NULL) {
  349. void *frame = cpu_domain_plic_sleep_frame_alloc_and_init();
  350. if (frame == NULL) {
  351. goto err;
  352. }
  353. s_cpu_retention.retent.plic_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  354. }
  355. if (s_cpu_retention.retent.clint_frame == NULL) {
  356. void *frame = cpu_domain_clint_sleep_frame_alloc_and_init();
  357. if (frame == NULL) {
  358. goto err;
  359. }
  360. s_cpu_retention.retent.clint_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  361. }
  362. return ESP_OK;
  363. err:
  364. esp_sleep_cpu_retention_deinit();
  365. return ESP_ERR_NO_MEM;
  366. }
  367. static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
  368. {
  369. if (s_cpu_retention.retent.critical_frame) {
  370. heap_caps_free((void *)s_cpu_retention.retent.critical_frame);
  371. s_cpu_retention.retent.critical_frame = NULL;
  372. rv_core_critical_regs_frame = NULL;
  373. }
  374. if (s_cpu_retention.retent.non_critical_frame) {
  375. heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame);
  376. s_cpu_retention.retent.non_critical_frame = NULL;
  377. }
  378. if (s_cpu_retention.retent.intpri_frame) {
  379. heap_caps_free((void *)s_cpu_retention.retent.intpri_frame);
  380. s_cpu_retention.retent.intpri_frame = NULL;
  381. }
  382. if (s_cpu_retention.retent.cache_config_frame) {
  383. heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
  384. s_cpu_retention.retent.cache_config_frame = NULL;
  385. }
  386. if (s_cpu_retention.retent.plic_frame) {
  387. heap_caps_free((void *)s_cpu_retention.retent.plic_frame);
  388. s_cpu_retention.retent.plic_frame = NULL;
  389. }
  390. if (s_cpu_retention.retent.clint_frame) {
  391. heap_caps_free((void *)s_cpu_retention.retent.clint_frame);
  392. s_cpu_retention.retent.clint_frame = NULL;
  393. }
  394. return ESP_OK;
  395. }
  396. static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void)
  397. {
  398. uint32_t mstatus;
  399. __asm__ __volatile__ (
  400. "csrr %0, mstatus\n"
  401. "csrci mstatus, 0x8\n"
  402. : "=r"(mstatus)
  403. );
  404. return mstatus;
  405. }
  406. static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus)
  407. {
  408. __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus));
  409. }
  410. static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
  411. {
  412. assert(s_cpu_retention.retent.non_critical_frame);
  413. RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame;
  414. frame->mscratch = RV_READ_CSR(mscratch);
  415. frame->mideleg = RV_READ_CSR(mideleg);
  416. frame->misa = RV_READ_CSR(misa);
  417. frame->tselect = RV_READ_CSR(tselect);
  418. frame->tdata1 = RV_READ_CSR(tdata1);
  419. frame->tdata2 = RV_READ_CSR(tdata2);
  420. frame->tcontrol = RV_READ_CSR(tcontrol);
  421. frame->pmpaddr0 = RV_READ_CSR(pmpaddr0);
  422. frame->pmpaddr1 = RV_READ_CSR(pmpaddr1);
  423. frame->pmpaddr2 = RV_READ_CSR(pmpaddr2);
  424. frame->pmpaddr3 = RV_READ_CSR(pmpaddr3);
  425. frame->pmpaddr4 = RV_READ_CSR(pmpaddr4);
  426. frame->pmpaddr5 = RV_READ_CSR(pmpaddr5);
  427. frame->pmpaddr6 = RV_READ_CSR(pmpaddr6);
  428. frame->pmpaddr7 = RV_READ_CSR(pmpaddr7);
  429. frame->pmpaddr8 = RV_READ_CSR(pmpaddr8);
  430. frame->pmpaddr9 = RV_READ_CSR(pmpaddr9);
  431. frame->pmpaddr10 = RV_READ_CSR(pmpaddr10);
  432. frame->pmpaddr11 = RV_READ_CSR(pmpaddr11);
  433. frame->pmpaddr12 = RV_READ_CSR(pmpaddr12);
  434. frame->pmpaddr13 = RV_READ_CSR(pmpaddr13);
  435. frame->pmpaddr14 = RV_READ_CSR(pmpaddr14);
  436. frame->pmpaddr15 = RV_READ_CSR(pmpaddr15);
  437. frame->pmpcfg0 = RV_READ_CSR(pmpcfg0);
  438. frame->pmpcfg1 = RV_READ_CSR(pmpcfg1);
  439. frame->pmpcfg2 = RV_READ_CSR(pmpcfg2);
  440. frame->pmpcfg3 = RV_READ_CSR(pmpcfg3);
  441. #if SOC_CPU_HAS_PMA
  442. frame->pmaaddr0 = RV_READ_CSR(CSR_PMAADDR(0));
  443. frame->pmaaddr1 = RV_READ_CSR(CSR_PMAADDR(1));
  444. frame->pmaaddr2 = RV_READ_CSR(CSR_PMAADDR(2));
  445. frame->pmaaddr3 = RV_READ_CSR(CSR_PMAADDR(3));
  446. frame->pmaaddr4 = RV_READ_CSR(CSR_PMAADDR(4));
  447. frame->pmaaddr5 = RV_READ_CSR(CSR_PMAADDR(5));
  448. frame->pmaaddr6 = RV_READ_CSR(CSR_PMAADDR(6));
  449. frame->pmaaddr7 = RV_READ_CSR(CSR_PMAADDR(7));
  450. frame->pmaaddr8 = RV_READ_CSR(CSR_PMAADDR(8));
  451. frame->pmaaddr9 = RV_READ_CSR(CSR_PMAADDR(9));
  452. frame->pmaaddr10 = RV_READ_CSR(CSR_PMAADDR(10));
  453. frame->pmaaddr11 = RV_READ_CSR(CSR_PMAADDR(11));
  454. frame->pmaaddr12 = RV_READ_CSR(CSR_PMAADDR(12));
  455. frame->pmaaddr13 = RV_READ_CSR(CSR_PMAADDR(13));
  456. frame->pmaaddr14 = RV_READ_CSR(CSR_PMAADDR(14));
  457. frame->pmaaddr15 = RV_READ_CSR(CSR_PMAADDR(15));
  458. frame->pmacfg0 = RV_READ_CSR(CSR_PMACFG(0));
  459. frame->pmacfg1 = RV_READ_CSR(CSR_PMACFG(1));
  460. frame->pmacfg2 = RV_READ_CSR(CSR_PMACFG(2));
  461. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(3));
  462. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(4));
  463. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(5));
  464. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(6));
  465. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(7));
  466. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(8));
  467. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(9));
  468. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(10));
  469. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(11));
  470. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(12));
  471. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(13));
  472. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(14));
  473. frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(15));
  474. #endif // SOC_CPU_HAS_PMA
  475. frame->utvec = RV_READ_CSR(utvec);
  476. frame->ustatus = RV_READ_CSR(ustatus);
  477. frame->uepc = RV_READ_CSR(uepc);
  478. frame->ucause = RV_READ_CSR(ucause);
  479. frame->mpcer = RV_READ_CSR(CUSTOM_CSR_PCER_MACHINE);
  480. frame->mpcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_MACHINE);
  481. frame->mpccr = RV_READ_CSR(CUSTOM_CSR_PCCR_MACHINE);
  482. frame->cpu_testbus_ctrl = RV_READ_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL);
  483. frame->upcer = RV_READ_CSR(CUSTOM_CSR_PCER_USER);
  484. frame->upcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_USER);
  485. frame->upccr = RV_READ_CSR(CUSTOM_CSR_PCCR_USER);
  486. frame->ugpio_oen = RV_READ_CSR(CUSTOM_CSR_GPIO_OEN_USER);
  487. frame->ugpio_in = RV_READ_CSR(CUSTOM_CSR_GPIO_IN_USER);
  488. frame->ugpio_out = RV_READ_CSR(CUSTOM_CSR_GPIO_OUT_USER);
  489. return frame;
  490. }
  491. static IRAM_ATTR void rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame *frame)
  492. {
  493. assert(frame);
  494. RV_WRITE_CSR(mscratch, frame->mscratch);
  495. RV_WRITE_CSR(mideleg, frame->mideleg);
  496. RV_WRITE_CSR(misa, frame->misa);
  497. RV_WRITE_CSR(tselect, frame->tselect);
  498. RV_WRITE_CSR(tdata1, frame->tdata1);
  499. RV_WRITE_CSR(tdata2, frame->tdata2);
  500. RV_WRITE_CSR(tcontrol, frame->tcontrol);
  501. RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0);
  502. RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1);
  503. RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2);
  504. RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3);
  505. RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4);
  506. RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5);
  507. RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6);
  508. RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7);
  509. RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8);
  510. RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9);
  511. RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10);
  512. RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11);
  513. RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12);
  514. RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13);
  515. RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14);
  516. RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15);
  517. RV_WRITE_CSR(pmpcfg0, frame->pmpcfg0);
  518. RV_WRITE_CSR(pmpcfg1, frame->pmpcfg1);
  519. RV_WRITE_CSR(pmpcfg2, frame->pmpcfg2);
  520. RV_WRITE_CSR(pmpcfg3, frame->pmpcfg3);
  521. #if SOC_CPU_HAS_PMA
  522. RV_WRITE_CSR(CSR_PMAADDR(0), frame->pmaaddr0);
  523. RV_WRITE_CSR(CSR_PMAADDR(1), frame->pmaaddr1);
  524. RV_WRITE_CSR(CSR_PMAADDR(2), frame->pmaaddr2);
  525. RV_WRITE_CSR(CSR_PMAADDR(3), frame->pmaaddr3);
  526. RV_WRITE_CSR(CSR_PMAADDR(4), frame->pmaaddr4);
  527. RV_WRITE_CSR(CSR_PMAADDR(5), frame->pmaaddr5);
  528. RV_WRITE_CSR(CSR_PMAADDR(6), frame->pmaaddr6);
  529. RV_WRITE_CSR(CSR_PMAADDR(7), frame->pmaaddr7);
  530. RV_WRITE_CSR(CSR_PMAADDR(8), frame->pmaaddr8);
  531. RV_WRITE_CSR(CSR_PMAADDR(9), frame->pmaaddr9);
  532. RV_WRITE_CSR(CSR_PMAADDR(10),frame->pmaaddr10);
  533. RV_WRITE_CSR(CSR_PMAADDR(11),frame->pmaaddr11);
  534. RV_WRITE_CSR(CSR_PMAADDR(12),frame->pmaaddr12);
  535. RV_WRITE_CSR(CSR_PMAADDR(13),frame->pmaaddr13);
  536. RV_WRITE_CSR(CSR_PMAADDR(14),frame->pmaaddr14);
  537. RV_WRITE_CSR(CSR_PMAADDR(15),frame->pmaaddr15);
  538. RV_WRITE_CSR(CSR_PMACFG(0), frame->pmacfg0);
  539. RV_WRITE_CSR(CSR_PMACFG(1), frame->pmacfg1);
  540. RV_WRITE_CSR(CSR_PMACFG(2), frame->pmacfg2);
  541. RV_WRITE_CSR(CSR_PMACFG(3), frame->pmacfg3);
  542. RV_WRITE_CSR(CSR_PMACFG(4), frame->pmacfg4);
  543. RV_WRITE_CSR(CSR_PMACFG(5), frame->pmacfg5);
  544. RV_WRITE_CSR(CSR_PMACFG(6), frame->pmacfg6);
  545. RV_WRITE_CSR(CSR_PMACFG(7), frame->pmacfg7);
  546. RV_WRITE_CSR(CSR_PMACFG(8), frame->pmacfg8);
  547. RV_WRITE_CSR(CSR_PMACFG(9), frame->pmacfg9);
  548. RV_WRITE_CSR(CSR_PMACFG(10), frame->pmacfg10);
  549. RV_WRITE_CSR(CSR_PMACFG(11), frame->pmacfg11);
  550. RV_WRITE_CSR(CSR_PMACFG(12), frame->pmacfg12);
  551. RV_WRITE_CSR(CSR_PMACFG(13), frame->pmacfg13);
  552. RV_WRITE_CSR(CSR_PMACFG(14), frame->pmacfg14);
  553. RV_WRITE_CSR(CSR_PMACFG(15), frame->pmacfg15);
  554. #endif //SOC_CPU_HAS_PMA
  555. RV_WRITE_CSR(utvec, frame->utvec);
  556. RV_WRITE_CSR(ustatus, frame->ustatus);
  557. RV_WRITE_CSR(uepc, frame->uepc);
  558. RV_WRITE_CSR(ucause, frame->ucause);
  559. RV_WRITE_CSR(CUSTOM_CSR_PCER_MACHINE, frame->mpcer);
  560. RV_WRITE_CSR(CUSTOM_CSR_PCMR_MACHINE, frame->mpcmr);
  561. RV_WRITE_CSR(CUSTOM_CSR_PCCR_MACHINE, frame->mpccr);
  562. RV_WRITE_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL, frame->cpu_testbus_ctrl);
  563. RV_WRITE_CSR(CUSTOM_CSR_PCER_USER, frame->upcer);
  564. RV_WRITE_CSR(CUSTOM_CSR_PCMR_USER, frame->upcmr);
  565. RV_WRITE_CSR(CUSTOM_CSR_PCCR_USER, frame->upccr);
  566. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OEN_USER,frame->ugpio_oen);
  567. RV_WRITE_CSR(CUSTOM_CSR_GPIO_IN_USER, frame->ugpio_in);
  568. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OUT_USER,frame->ugpio_out);
  569. }
  570. static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame)
  571. {
  572. assert(frame);
  573. cpu_domain_dev_regs_region_t *region = frame->region;
  574. uint32_t *regs_frame = frame->regs_frame;
  575. int offset = 0;
  576. for (int i = 0; i < frame->region_num; i++) {
  577. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  578. regs_frame[offset++] = *(uint32_t *)addr;
  579. }
  580. }
  581. }
  582. static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame)
  583. {
  584. assert(frame);
  585. cpu_domain_dev_regs_region_t *region = frame->region;
  586. uint32_t *regs_frame = frame->regs_frame;
  587. int offset = 0;
  588. for (int i = 0; i < frame->region_num; i++) {
  589. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  590. *(uint32_t *)addr = regs_frame[offset++];
  591. }
  592. }
  593. }
  594. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  595. static void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
  596. {
  597. *(frame_crc_ptr) = esp_crc32_le(0, (void *)frame_ptr, frame_check_size);
  598. }
  599. static void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr)
  600. {
  601. if(*(frame_crc_ptr) != esp_crc32_le(0, (void *)(frame_ptr), frame_check_size)){
  602. // resume uarts
  603. for (int i = 0; i < SOC_UART_NUM; ++i) {
  604. #ifndef CONFIG_IDF_TARGET_ESP32
  605. if (!periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) {
  606. continue;
  607. }
  608. #endif
  609. uart_ll_force_xon(i);
  610. }
  611. /* Since it is still in the critical now, use ESP_EARLY_LOG */
  612. ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted");
  613. esp_restart_noos();
  614. }
  615. }
  616. #endif
  617. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void);
  618. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void);
  619. typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
  620. static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
  621. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  622. {
  623. RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save();
  624. if ((frame->pmufunc & 0x3) == 0x1) {
  625. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  626. /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */
  627. update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
  628. #endif
  629. REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore);
  630. return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  631. }
  632. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  633. else {
  634. validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc));
  635. }
  636. #endif
  637. return pmu_sleep_finish();
  638. }
  639. esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
  640. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  641. {
  642. uint32_t mstatus = save_mstatus_and_disable_global_int();
  643. cpu_domain_dev_regs_save(s_cpu_retention.retent.plic_frame);
  644. cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
  645. cpu_domain_dev_regs_save(s_cpu_retention.retent.intpri_frame);
  646. cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
  647. RvCoreNonCriticalSleepFrame *frame = rv_core_noncritical_regs_save();
  648. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  649. /* Minus sizeof(long) is for bypass `frame_crc` field */
  650. update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
  651. #endif
  652. esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  653. #if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME
  654. validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc));
  655. #endif
  656. rv_core_noncritical_regs_restore(frame);
  657. cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
  658. cpu_domain_dev_regs_restore(s_cpu_retention.retent.intpri_frame);
  659. cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
  660. cpu_domain_dev_regs_restore(s_cpu_retention.retent.plic_frame);
  661. restore_mstatus(mstatus);
  662. return err;
  663. }
  664. #endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  665. #if SOC_PM_SUPPORT_CPU_PD
  666. esp_err_t esp_sleep_cpu_retention_init(void)
  667. {
  668. esp_err_t err = ESP_OK;
  669. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  670. err = esp_sleep_cpu_pd_low_init();
  671. #elif SOC_PM_CPU_RETENTION_BY_SW
  672. err = esp_sleep_cpu_retention_init_impl();
  673. #endif
  674. return err;
  675. }
  676. esp_err_t esp_sleep_cpu_retention_deinit(void)
  677. {
  678. esp_err_t err = ESP_OK;
  679. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  680. err = esp_sleep_cpu_pd_low_deinit();
  681. #elif SOC_PM_CPU_RETENTION_BY_SW
  682. err = esp_sleep_cpu_retention_deinit_impl();
  683. #endif
  684. return err;
  685. }
  686. bool cpu_domain_pd_allowed(void)
  687. {
  688. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  689. return (s_cpu_retention.retent.cpu_pd_mem != NULL);
  690. #elif SOC_PM_CPU_RETENTION_BY_SW
  691. return (s_cpu_retention.retent.critical_frame != NULL) && \
  692. (s_cpu_retention.retent.non_critical_frame != NULL) && \
  693. (s_cpu_retention.retent.intpri_frame != NULL) && \
  694. (s_cpu_retention.retent.cache_config_frame != NULL) && \
  695. (s_cpu_retention.retent.plic_frame != NULL) && \
  696. (s_cpu_retention.retent.clint_frame != NULL);
  697. #else
  698. return false;
  699. #endif
  700. }
  701. esp_err_t sleep_cpu_configure(bool light_sleep_enable)
  702. {
  703. #if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP
  704. if (light_sleep_enable) {
  705. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep.");
  706. } else {
  707. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory");
  708. }
  709. #endif
  710. return ESP_OK;
  711. }
  712. #endif