sleep_cpu.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stddef.h>
  7. #include <string.h>
  8. #include <inttypes.h>
  9. #include <sys/lock.h>
  10. #include <sys/param.h>
  11. #include "esp_attr.h"
  12. #include "esp_check.h"
  13. #include "esp_sleep.h"
  14. #include "esp_log.h"
  15. #include "freertos/FreeRTOS.h"
  16. #include "freertos/task.h"
  17. #include "esp_heap_caps.h"
  18. #include "soc/soc_caps.h"
  19. #include "esp_private/sleep_cpu.h"
  20. #include "sdkconfig.h"
  21. #if !SOC_PMU_SUPPORTED
  22. #include "hal/rtc_hal.h"
  23. #endif
  24. #include "soc/rtc_periph.h"
  25. #ifdef CONFIG_IDF_TARGET_ESP32S3
  26. #include "esp32s3/rom/cache.h"
  27. #elif CONFIG_IDF_TARGET_ESP32C6
  28. #include "esp32c6/rom/rtc.h"
  29. #include "riscv/rvsleep-frames.h"
  30. #include "soc/intpri_reg.h"
  31. #include "soc/extmem_reg.h"
  32. #include "soc/plic_reg.h"
  33. #include "soc/clint_reg.h"
  34. #include "esp32c6/rom/cache.h"
  35. #endif
  36. static __attribute__((unused)) const char *TAG = "sleep";
  37. typedef struct {
  38. uint32_t start;
  39. uint32_t end;
  40. } cpu_domain_dev_regs_region_t;
  41. typedef struct {
  42. cpu_domain_dev_regs_region_t *region;
  43. int region_num;
  44. uint32_t *regs_frame;
  45. } cpu_domain_dev_sleep_frame_t;
  46. /**
  47. * Internal structure which holds all requested light sleep cpu retention parameters
  48. */
  49. typedef struct {
  50. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  51. rtc_cntl_sleep_retent_t retent;
  52. #elif SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  53. struct {
  54. RvCoreCriticalSleepFrame *critical_frame;
  55. RvCoreNonCriticalSleepFrame *non_critical_frame;
  56. cpu_domain_dev_sleep_frame_t *intpri_frame;
  57. cpu_domain_dev_sleep_frame_t *cache_config_frame;
  58. cpu_domain_dev_sleep_frame_t *plic_frame;
  59. cpu_domain_dev_sleep_frame_t *clint_frame;
  60. } retent;
  61. #endif
  62. } sleep_cpu_retention_t;
  63. static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention;
  64. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  65. #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  66. static uint32_t cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size)
  67. {
  68. uint32_t sets; /* i/d-cache total set counts */
  69. uint32_t index; /* virtual address mapping i/d-cache row offset */
  70. uint32_t waysgrp;
  71. uint32_t icache_tagmem_blk_gs, dcache_tagmem_blk_gs;
  72. struct cache_mode imode = { .icache = 1 };
  73. struct cache_mode dmode = { .icache = 0 };
  74. /* calculate/prepare i-cache tag memory retention parameters */
  75. Cache_Get_Mode(&imode);
  76. sets = imode.cache_size / imode.cache_ways / imode.cache_line_size;
  77. index = (code_seg_vaddr / imode.cache_line_size) % sets;
  78. waysgrp = imode.cache_ways >> 2;
  79. code_seg_size = ALIGNUP(imode.cache_line_size, code_seg_size);
  80. s_cpu_retention.retent.tagmem.icache.start_point = index;
  81. s_cpu_retention.retent.tagmem.icache.size = (sets * waysgrp) & 0xff;
  82. s_cpu_retention.retent.tagmem.icache.vld_size = s_cpu_retention.retent.tagmem.icache.size;
  83. if (code_seg_size < imode.cache_size / imode.cache_ways) {
  84. s_cpu_retention.retent.tagmem.icache.vld_size = (code_seg_size / imode.cache_line_size) * waysgrp;
  85. }
  86. s_cpu_retention.retent.tagmem.icache.enable = (code_seg_size != 0) ? 1 : 0;
  87. icache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.icache.vld_size ? s_cpu_retention.retent.tagmem.icache.vld_size : sets * waysgrp;
  88. icache_tagmem_blk_gs = ALIGNUP(4, icache_tagmem_blk_gs);
  89. ESP_LOGD(TAG, "I-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (imode.cache_size>>10),
  90. imode.cache_line_size, imode.cache_ways, sets, index, icache_tagmem_blk_gs);
  91. /* calculate/prepare d-cache tag memory retention parameters */
  92. Cache_Get_Mode(&dmode);
  93. sets = dmode.cache_size / dmode.cache_ways / dmode.cache_line_size;
  94. index = (data_seg_vaddr / dmode.cache_line_size) % sets;
  95. waysgrp = dmode.cache_ways >> 2;
  96. data_seg_size = ALIGNUP(dmode.cache_line_size, data_seg_size);
  97. s_cpu_retention.retent.tagmem.dcache.start_point = index;
  98. s_cpu_retention.retent.tagmem.dcache.size = (sets * waysgrp) & 0x1ff;
  99. s_cpu_retention.retent.tagmem.dcache.vld_size = s_cpu_retention.retent.tagmem.dcache.size;
  100. #ifndef CONFIG_ESP32S3_DATA_CACHE_16KB
  101. if (data_seg_size < dmode.cache_size / dmode.cache_ways) {
  102. s_cpu_retention.retent.tagmem.dcache.vld_size = (data_seg_size / dmode.cache_line_size) * waysgrp;
  103. }
  104. s_cpu_retention.retent.tagmem.dcache.enable = (data_seg_size != 0) ? 1 : 0;
  105. #else
  106. s_cpu_retention.retent.tagmem.dcache.enable = 1;
  107. #endif
  108. dcache_tagmem_blk_gs = s_cpu_retention.retent.tagmem.dcache.vld_size ? s_cpu_retention.retent.tagmem.dcache.vld_size : sets * waysgrp;
  109. dcache_tagmem_blk_gs = ALIGNUP(4, dcache_tagmem_blk_gs);
  110. ESP_LOGD(TAG, "D-cache size:%d KiB, line size:%d B, ways:%d, sets:%d, index:%d, tag block groups:%d", (dmode.cache_size>>10),
  111. dmode.cache_line_size, dmode.cache_ways, sets, index, dcache_tagmem_blk_gs);
  112. /* For I or D cache tagmem retention, backup and restore are performed through
  113. * RTC DMA (its bus width is 128 bits), For I/D Cache tagmem blocks (i-cache
  114. * tagmem blocks = 92 bits, d-cache tagmem blocks = 88 bits), RTC DMA automatically
  115. * aligns its bit width to 96 bits, therefore, 3 times RTC DMA can transfer 4
  116. * i/d-cache tagmem blocks (128 bits * 3 = 96 bits * 4) */
  117. return (((icache_tagmem_blk_gs + dcache_tagmem_blk_gs) << 2) * 3);
  118. }
  119. #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  120. static esp_err_t esp_sleep_tagmem_pd_low_init(void)
  121. {
  122. #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  123. if (s_cpu_retention.retent.tagmem.link_addr == NULL) {
  124. extern char _stext[], _etext[];
  125. uint32_t code_start = (uint32_t)_stext;
  126. uint32_t code_size = (uint32_t)(_etext - _stext);
  127. #if !(CONFIG_SPIRAM && CONFIG_SOC_PM_SUPPORT_TAGMEM_PD)
  128. extern char _rodata_start[], _rodata_reserved_end[];
  129. uint32_t data_start = (uint32_t)_rodata_start;
  130. uint32_t data_size = (uint32_t)(_rodata_reserved_end - _rodata_start);
  131. #else
  132. uint32_t data_start = SOC_DROM_LOW;
  133. uint32_t data_size = SOC_EXTRAM_DATA_SIZE;
  134. #endif
  135. ESP_LOGI(TAG, "Code start at 0x%08"PRIx32", total %"PRIu32", data start at 0x%08"PRIx32", total %"PRIu32" Bytes",
  136. code_start, code_size, data_start, data_size);
  137. uint32_t tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size);
  138. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, 1,
  139. tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE,
  140. MALLOC_CAP_RETENTION);
  141. if (buf) {
  142. s_cpu_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf,
  143. buf + RTC_HAL_DMA_LINK_NODE_SIZE, tagmem_sz, NULL);
  144. } else {
  145. s_cpu_retention.retent.tagmem.icache.enable = 0;
  146. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  147. s_cpu_retention.retent.tagmem.link_addr = NULL;
  148. return ESP_ERR_NO_MEM;
  149. }
  150. }
  151. #else // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  152. s_cpu_retention.retent.tagmem.icache.enable = 0;
  153. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  154. s_cpu_retention.retent.tagmem.link_addr = NULL;
  155. #endif // CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP
  156. return ESP_OK;
  157. }
  158. static esp_err_t esp_sleep_tagmem_pd_low_deinit(void)
  159. {
  160. #if SOC_PM_SUPPORT_TAGMEM_PD
  161. if (s_cpu_retention.retent.tagmem.link_addr) {
  162. heap_caps_free(s_cpu_retention.retent.tagmem.link_addr);
  163. s_cpu_retention.retent.tagmem.icache.enable = 0;
  164. s_cpu_retention.retent.tagmem.dcache.enable = 0;
  165. s_cpu_retention.retent.tagmem.link_addr = NULL;
  166. }
  167. #endif
  168. return ESP_OK;
  169. }
  170. #endif // SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  171. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  172. esp_err_t esp_sleep_cpu_pd_low_init(void)
  173. {
  174. if (s_cpu_retention.retent.cpu_pd_mem == NULL) {
  175. void *buf = heap_caps_aligned_calloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, 1,
  176. SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE,
  177. MALLOC_CAP_RETENTION);
  178. if (buf) {
  179. s_cpu_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
  180. buf + RTC_HAL_DMA_LINK_NODE_SIZE, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE, NULL);
  181. } else {
  182. return ESP_ERR_NO_MEM;
  183. }
  184. }
  185. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  186. if (esp_sleep_tagmem_pd_low_init() != ESP_OK) {
  187. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  188. esp_sleep_cpu_pd_low_deinit();
  189. return ESP_ERR_NO_MEM;
  190. #endif
  191. }
  192. #endif
  193. return ESP_OK;
  194. }
  195. esp_err_t esp_sleep_cpu_pd_low_deinit(void)
  196. {
  197. if (s_cpu_retention.retent.cpu_pd_mem) {
  198. heap_caps_free(s_cpu_retention.retent.cpu_pd_mem);
  199. s_cpu_retention.retent.cpu_pd_mem = NULL;
  200. }
  201. #if SOC_PM_SUPPORT_TAGMEM_PD && SOC_PM_CPU_RETENTION_BY_RTCCNTL
  202. if (esp_sleep_tagmem_pd_low_deinit() != ESP_OK) {
  203. #ifdef CONFIG_ESP32S3_DATA_CACHE_16KB
  204. esp_sleep_cpu_pd_low_deinit();
  205. return ESP_ERR_NO_MEM;
  206. #endif
  207. }
  208. #endif
  209. return ESP_OK;
  210. }
  211. void sleep_enable_cpu_retention(void)
  212. {
  213. rtc_cntl_hal_enable_cpu_retention(&s_cpu_retention.retent);
  214. #if SOC_PM_SUPPORT_TAGMEM_PD
  215. rtc_cntl_hal_enable_tagmem_retention(&s_cpu_retention.retent);
  216. #endif
  217. }
  218. void IRAM_ATTR sleep_disable_cpu_retention(void)
  219. {
  220. rtc_cntl_hal_disable_cpu_retention(&s_cpu_retention.retent);
  221. #if SOC_PM_SUPPORT_TAGMEM_PD
  222. rtc_cntl_hal_disable_tagmem_retention(&s_cpu_retention.retent);
  223. #endif
  224. }
  225. #endif
  226. #if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  227. #define CUSTOM_CSR_PCER_MACHINE 0x7e0
  228. #define CUSTOM_CSR_PCMR_MACHINE 0x7e1
  229. #define CUSTOM_CSR_PCCR_MACHINE 0x7e2
  230. #define CUSTOM_CSR_CPU_TESTBUS_CTRL 0x7e3
  231. #define CUSTOM_CSR_PCER_USER 0x800
  232. #define CUSTOM_CSR_PCMR_USER 0x801
  233. #define CUSTOM_CSR_PCCR_USER 0x802
  234. #define CUSTOM_CSR_GPIO_OEN_USER 0x803
  235. #define CUSTOM_CSR_GPIO_IN_USER 0x804
  236. #define CUSTOM_CSR_GPIO_OUT_USER 0x805
  237. #define CUSTOM_CSR_CO_EXCEPTION_CAUSE 0x7f0
  238. #define CUSTOM_CSR_CO_HWLP 0x7f1
  239. #define CUSTOM_CSR_CO_AIA 0x7f2
  240. extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame;
  241. static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num)
  242. {
  243. const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num;
  244. int regs_frame_sz = 0;
  245. for (int num = 0; num < region_num; num++) {
  246. regs_frame_sz += regions[num].end - regions[num].start;
  247. }
  248. void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  249. if (frame) {
  250. cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t));
  251. memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t));
  252. void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz;
  253. memset(regs_frame, 0, regs_frame_sz);
  254. *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) {
  255. .region = region,
  256. .region_num = region_num,
  257. .regs_frame = (uint32_t *)regs_frame
  258. };
  259. }
  260. return frame;
  261. }
  262. static inline void * cpu_domain_intpri_sleep_frame_alloc_and_init(void)
  263. {
  264. const static cpu_domain_dev_regs_region_t regions[] = {
  265. { .start = INTPRI_CORE0_CPU_INT_ENABLE_REG, .end = INTPRI_RND_ECO_LOW_REG + 4 },
  266. { .start = INTPRI_RND_ECO_HIGH_REG, .end = INTPRI_RND_ECO_HIGH_REG + 4 }
  267. };
  268. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  269. }
  270. static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void)
  271. {
  272. const static cpu_domain_dev_regs_region_t regions[] = {
  273. { .start = EXTMEM_L1_CACHE_CTRL_REG, .end = EXTMEM_L1_CACHE_CTRL_REG + 4 },
  274. { .start = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = EXTMEM_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 }
  275. };
  276. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  277. }
  278. static inline void * cpu_domain_plic_sleep_frame_alloc_and_init(void)
  279. {
  280. const static cpu_domain_dev_regs_region_t regions[] = {
  281. { .start = PLIC_MXINT_ENABLE_REG, .end = PLIC_MXINT_CLAIM_REG + 4 },
  282. { .start = PLIC_MXINT_CONF_REG, .end = PLIC_MXINT_CONF_REG + 4 },
  283. { .start = PLIC_UXINT_ENABLE_REG, .end = PLIC_UXINT_CLAIM_REG + 4 },
  284. { .start = PLIC_UXINT_CONF_REG, .end = PLIC_UXINT_CONF_REG + 4 }
  285. };
  286. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  287. }
  288. static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(void)
  289. {
  290. const static cpu_domain_dev_regs_region_t regions[] = {
  291. { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_MTIMECMP_H_REG + 4 },
  292. { .start = CLINT_UINT_SIP_REG, .end = CLINT_UINT_UTIMECMP_H_REG + 4 }
  293. };
  294. return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0]));
  295. }
  296. static esp_err_t esp_sleep_cpu_retention_init_impl(void)
  297. {
  298. if (s_cpu_retention.retent.critical_frame == NULL) {
  299. void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  300. if (frame == NULL) {
  301. goto err;
  302. }
  303. s_cpu_retention.retent.critical_frame = (RvCoreCriticalSleepFrame *)frame;
  304. rv_core_critical_regs_frame = (RvCoreCriticalSleepFrame *)frame;
  305. }
  306. if (s_cpu_retention.retent.non_critical_frame == NULL) {
  307. void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL);
  308. if (frame == NULL) {
  309. goto err;
  310. }
  311. s_cpu_retention.retent.non_critical_frame = (RvCoreNonCriticalSleepFrame *)frame;
  312. }
  313. if (s_cpu_retention.retent.intpri_frame == NULL) {
  314. void *frame = cpu_domain_intpri_sleep_frame_alloc_and_init();
  315. if (frame == NULL) {
  316. goto err;
  317. }
  318. s_cpu_retention.retent.intpri_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  319. }
  320. if (s_cpu_retention.retent.cache_config_frame == NULL) {
  321. void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init();
  322. if (frame == NULL) {
  323. goto err;
  324. }
  325. s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  326. }
  327. if (s_cpu_retention.retent.plic_frame == NULL) {
  328. void *frame = cpu_domain_plic_sleep_frame_alloc_and_init();
  329. if (frame == NULL) {
  330. goto err;
  331. }
  332. s_cpu_retention.retent.plic_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  333. }
  334. if (s_cpu_retention.retent.clint_frame == NULL) {
  335. void *frame = cpu_domain_clint_sleep_frame_alloc_and_init();
  336. if (frame == NULL) {
  337. goto err;
  338. }
  339. s_cpu_retention.retent.clint_frame = (cpu_domain_dev_sleep_frame_t *)frame;
  340. }
  341. return ESP_OK;
  342. err:
  343. esp_sleep_cpu_retention_deinit();
  344. return ESP_ERR_NO_MEM;
  345. }
  346. static esp_err_t esp_sleep_cpu_retention_deinit_impl(void)
  347. {
  348. if (s_cpu_retention.retent.critical_frame) {
  349. heap_caps_free((void *)s_cpu_retention.retent.critical_frame);
  350. s_cpu_retention.retent.critical_frame = NULL;
  351. rv_core_critical_regs_frame = NULL;
  352. }
  353. if (s_cpu_retention.retent.non_critical_frame) {
  354. heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame);
  355. s_cpu_retention.retent.non_critical_frame = NULL;
  356. }
  357. if (s_cpu_retention.retent.intpri_frame) {
  358. heap_caps_free((void *)s_cpu_retention.retent.intpri_frame);
  359. s_cpu_retention.retent.intpri_frame = NULL;
  360. }
  361. if (s_cpu_retention.retent.cache_config_frame) {
  362. heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame);
  363. s_cpu_retention.retent.cache_config_frame = NULL;
  364. }
  365. if (s_cpu_retention.retent.plic_frame) {
  366. heap_caps_free((void *)s_cpu_retention.retent.plic_frame);
  367. s_cpu_retention.retent.plic_frame = NULL;
  368. }
  369. if (s_cpu_retention.retent.clint_frame) {
  370. heap_caps_free((void *)s_cpu_retention.retent.clint_frame);
  371. s_cpu_retention.retent.clint_frame = NULL;
  372. }
  373. return ESP_OK;
  374. }
  375. static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void)
  376. {
  377. uint32_t mstatus;
  378. __asm__ __volatile__ (
  379. "csrr %0, mstatus\n"
  380. "csrci mstatus, 0x8\n"
  381. : "=r"(mstatus)
  382. );
  383. return mstatus;
  384. }
  385. static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus)
  386. {
  387. __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus));
  388. }
  389. static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void)
  390. {
  391. assert(s_cpu_retention.retent.non_critical_frame);
  392. RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame;
  393. frame->mscratch = RV_READ_CSR(mscratch);
  394. frame->mideleg = RV_READ_CSR(mideleg);
  395. frame->misa = RV_READ_CSR(misa);
  396. frame->tselect = RV_READ_CSR(tselect);
  397. frame->tdata1 = RV_READ_CSR(tdata1);
  398. frame->tdata2 = RV_READ_CSR(tdata2);
  399. frame->tcontrol = RV_READ_CSR(tcontrol);
  400. frame->pmpcfg0 = RV_READ_CSR(pmpcfg0);
  401. frame->pmpcfg1 = RV_READ_CSR(pmpcfg1);
  402. frame->pmpcfg2 = RV_READ_CSR(pmpcfg2);
  403. frame->pmpcfg3 = RV_READ_CSR(pmpcfg3);
  404. frame->pmpaddr0 = RV_READ_CSR(pmpaddr0);
  405. frame->pmpaddr1 = RV_READ_CSR(pmpaddr1);
  406. frame->pmpaddr2 = RV_READ_CSR(pmpaddr2);
  407. frame->pmpaddr3 = RV_READ_CSR(pmpaddr3);
  408. frame->pmpaddr4 = RV_READ_CSR(pmpaddr4);
  409. frame->pmpaddr5 = RV_READ_CSR(pmpaddr5);
  410. frame->pmpaddr6 = RV_READ_CSR(pmpaddr6);
  411. frame->pmpaddr7 = RV_READ_CSR(pmpaddr7);
  412. frame->pmpaddr8 = RV_READ_CSR(pmpaddr8);
  413. frame->pmpaddr9 = RV_READ_CSR(pmpaddr9);
  414. frame->pmpaddr10 = RV_READ_CSR(pmpaddr10);
  415. frame->pmpaddr11 = RV_READ_CSR(pmpaddr11);
  416. frame->pmpaddr12 = RV_READ_CSR(pmpaddr12);
  417. frame->pmpaddr13 = RV_READ_CSR(pmpaddr13);
  418. frame->pmpaddr14 = RV_READ_CSR(pmpaddr14);
  419. frame->pmpaddr15 = RV_READ_CSR(pmpaddr15);
  420. frame->utvec = RV_READ_CSR(utvec);
  421. frame->ustatus = RV_READ_CSR(ustatus);
  422. frame->uepc = RV_READ_CSR(uepc);
  423. frame->ucause = RV_READ_CSR(ucause);
  424. frame->mpcer = RV_READ_CSR(CUSTOM_CSR_PCER_MACHINE);
  425. frame->mpcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_MACHINE);
  426. frame->mpccr = RV_READ_CSR(CUSTOM_CSR_PCCR_MACHINE);
  427. frame->cpu_testbus_ctrl = RV_READ_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL);
  428. frame->upcer = RV_READ_CSR(CUSTOM_CSR_PCER_USER);
  429. frame->upcmr = RV_READ_CSR(CUSTOM_CSR_PCMR_USER);
  430. frame->upccr = RV_READ_CSR(CUSTOM_CSR_PCCR_USER);
  431. frame->ugpio_oen = RV_READ_CSR(CUSTOM_CSR_GPIO_OEN_USER);
  432. frame->ugpio_in = RV_READ_CSR(CUSTOM_CSR_GPIO_IN_USER);
  433. frame->ugpio_out = RV_READ_CSR(CUSTOM_CSR_GPIO_OUT_USER);
  434. return frame;
  435. }
  436. static IRAM_ATTR void rv_core_noncritical_regs_restore(RvCoreNonCriticalSleepFrame *frame)
  437. {
  438. assert(frame);
  439. RV_WRITE_CSR(mscratch, frame->mscratch);
  440. RV_WRITE_CSR(mideleg, frame->mideleg);
  441. RV_WRITE_CSR(misa, frame->misa);
  442. RV_WRITE_CSR(tselect, frame->tselect);
  443. RV_WRITE_CSR(tdata1, frame->tdata1);
  444. RV_WRITE_CSR(tdata2, frame->tdata2);
  445. RV_WRITE_CSR(tcontrol, frame->tcontrol);
  446. RV_WRITE_CSR(pmpcfg0, frame->pmpcfg0);
  447. RV_WRITE_CSR(pmpcfg1, frame->pmpcfg1);
  448. RV_WRITE_CSR(pmpcfg2, frame->pmpcfg2);
  449. RV_WRITE_CSR(pmpcfg3, frame->pmpcfg3);
  450. RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0);
  451. RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1);
  452. RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2);
  453. RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3);
  454. RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4);
  455. RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5);
  456. RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6);
  457. RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7);
  458. RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8);
  459. RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9);
  460. RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10);
  461. RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11);
  462. RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12);
  463. RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13);
  464. RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14);
  465. RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15);
  466. RV_WRITE_CSR(utvec, frame->utvec);
  467. RV_WRITE_CSR(ustatus, frame->ustatus);
  468. RV_WRITE_CSR(uepc, frame->uepc);
  469. RV_WRITE_CSR(ucause, frame->ucause);
  470. RV_WRITE_CSR(CUSTOM_CSR_PCER_MACHINE, frame->mpcer);
  471. RV_WRITE_CSR(CUSTOM_CSR_PCMR_MACHINE, frame->mpcmr);
  472. RV_WRITE_CSR(CUSTOM_CSR_PCCR_MACHINE, frame->mpccr);
  473. RV_WRITE_CSR(CUSTOM_CSR_CPU_TESTBUS_CTRL, frame->cpu_testbus_ctrl);
  474. RV_WRITE_CSR(CUSTOM_CSR_PCER_USER, frame->upcer);
  475. RV_WRITE_CSR(CUSTOM_CSR_PCMR_USER, frame->upcmr);
  476. RV_WRITE_CSR(CUSTOM_CSR_PCCR_USER, frame->upccr);
  477. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OEN_USER,frame->ugpio_oen);
  478. RV_WRITE_CSR(CUSTOM_CSR_GPIO_IN_USER, frame->ugpio_in);
  479. RV_WRITE_CSR(CUSTOM_CSR_GPIO_OUT_USER,frame->ugpio_out);
  480. }
  481. static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame)
  482. {
  483. assert(frame);
  484. cpu_domain_dev_regs_region_t *region = frame->region;
  485. uint32_t *regs_frame = frame->regs_frame;
  486. int offset = 0;
  487. for (int i = 0; i < frame->region_num; i++) {
  488. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  489. regs_frame[offset++] = *(uint32_t *)addr;
  490. }
  491. }
  492. }
  493. static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame)
  494. {
  495. assert(frame);
  496. cpu_domain_dev_regs_region_t *region = frame->region;
  497. uint32_t *regs_frame = frame->regs_frame;
  498. int offset = 0;
  499. for (int i = 0; i < frame->region_num; i++) {
  500. for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) {
  501. *(uint32_t *)addr = regs_frame[offset++];
  502. }
  503. }
  504. }
  505. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void);
  506. extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void);
  507. typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool);
  508. static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep,
  509. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  510. {
  511. RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save();
  512. if ((frame->pmufunc & 0x3) == 0x1) {
  513. REG_WRITE(LIGHT_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore);
  514. return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  515. }
  516. return ESP_OK;
  517. }
  518. esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool),
  519. uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp)
  520. {
  521. uint32_t mstatus = save_mstatus_and_disable_global_int();
  522. /* wait cache idle */
  523. Cache_Freeze_ICache_Enable(CACHE_FREEZE_ACK_BUSY);
  524. Cache_Freeze_ICache_Disable();
  525. cpu_domain_dev_regs_save(s_cpu_retention.retent.plic_frame);
  526. cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame);
  527. cpu_domain_dev_regs_save(s_cpu_retention.retent.intpri_frame);
  528. cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame);
  529. RvCoreNonCriticalSleepFrame *frame = rv_core_noncritical_regs_save();
  530. esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp);
  531. rv_core_noncritical_regs_restore(frame);
  532. cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame);
  533. cpu_domain_dev_regs_restore(s_cpu_retention.retent.intpri_frame);
  534. cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame);
  535. cpu_domain_dev_regs_restore(s_cpu_retention.retent.plic_frame);
  536. restore_mstatus(mstatus);
  537. return err;
  538. }
  539. #endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW
  540. #if SOC_PM_SUPPORT_CPU_PD
  541. esp_err_t esp_sleep_cpu_retention_init(void)
  542. {
  543. esp_err_t err = ESP_OK;
  544. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  545. err = esp_sleep_cpu_pd_low_init();
  546. #elif SOC_PM_CPU_RETENTION_BY_SW
  547. err = esp_sleep_cpu_retention_init_impl();
  548. #endif
  549. return err;
  550. }
  551. esp_err_t esp_sleep_cpu_retention_deinit(void)
  552. {
  553. esp_err_t err = ESP_OK;
  554. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  555. err = esp_sleep_cpu_pd_low_deinit();
  556. #elif SOC_PM_CPU_RETENTION_BY_SW
  557. err = esp_sleep_cpu_retention_deinit_impl();
  558. #endif
  559. return err;
  560. }
  561. bool cpu_domain_pd_allowed(void)
  562. {
  563. #if SOC_PM_CPU_RETENTION_BY_RTCCNTL
  564. return (s_cpu_retention.retent.cpu_pd_mem != NULL);
  565. #elif SOC_PM_CPU_RETENTION_BY_SW
  566. return (s_cpu_retention.retent.critical_frame != NULL) && \
  567. (s_cpu_retention.retent.non_critical_frame != NULL) && \
  568. (s_cpu_retention.retent.intpri_frame != NULL) && \
  569. (s_cpu_retention.retent.cache_config_frame != NULL) && \
  570. (s_cpu_retention.retent.plic_frame != NULL) && \
  571. (s_cpu_retention.retent.clint_frame != NULL);
  572. #else
  573. return false;
  574. #endif
  575. }
  576. esp_err_t sleep_cpu_configure(bool light_sleep_enable)
  577. {
  578. #if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP
  579. if (light_sleep_enable) {
  580. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep.");
  581. } else {
  582. ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory");
  583. }
  584. #endif
  585. return ESP_OK;
  586. }
  587. #endif