| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247 |
- /*
- * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
- *
- * SPDX-License-Identifier: Apache-2.0
- */
- #include "soc/soc.h"
- #include "riscv/rvsleep-frames.h"
- #include "soc/soc_caps.h"
- #include "sdkconfig.h"
- #if !CONFIG_IDF_TARGET_ESP32C6 && !CONFIG_IDF_TARGET_ESP32H2
- #include "soc/lp_aon_reg.h"
- #include "soc/extmem_reg.h"
- #endif
- .section .data1,"aw"
- .global rv_core_critical_regs_frame
- .type rv_core_critical_regs_frame,@object
- .align 4
- rv_core_critical_regs_frame:
- .word 0
- /*
- --------------------------------------------------------------------------------
- This assembly subroutine is used to save the critical registers of the CPU
- core to the internal RAM before sleep, and modify the PMU control flag to
- indicate that the system needs to sleep. When the subroutine returns, it
- will return the memory pointer that saves the context information of the CPU
- critical registers.
- --------------------------------------------------------------------------------
- */
- .section .iram1,"ax"
- .global rv_core_critical_regs_save
- .type rv_core_critical_regs_save,@function
- .align 4
- rv_core_critical_regs_save:
- /* arrived here in critical section. we need:
- save riscv core critical registers to RvCoreCriticalSleepFrame
- */
- csrw mscratch, t0 /* use mscratch as temp storage */
- la t0, rv_core_critical_regs_frame
- lw t0, 0(t0) /* t0 pointer to RvCoreCriticalSleepFrame object */
- sw ra, RV_SLP_CTX_RA(t0)
- sw sp, RV_SLP_CTX_SP(t0)
- sw gp, RV_SLP_CTX_GP(t0)
- sw tp, RV_SLP_CTX_TP(t0)
- sw t1, RV_SLP_CTX_T1(t0)
- sw t2, RV_SLP_CTX_T2(t0)
- sw s0, RV_SLP_CTX_S0(t0)
- sw s1, RV_SLP_CTX_S1(t0)
- /* a0 is caller saved, so it does not need to be saved, but it should be the
- pointer value of RvCoreCriticalSleepFrame for return.
- */
- mv a0, t0
- sw a0, RV_SLP_CTX_A0(t0)
- sw a1, RV_SLP_CTX_A1(t0)
- sw a2, RV_SLP_CTX_A2(t0)
- sw a3, RV_SLP_CTX_A3(t0)
- sw a4, RV_SLP_CTX_A4(t0)
- sw a5, RV_SLP_CTX_A5(t0)
- sw a6, RV_SLP_CTX_A6(t0)
- sw a7, RV_SLP_CTX_A7(t0)
- sw s2, RV_SLP_CTX_S2(t0)
- sw s3, RV_SLP_CTX_S3(t0)
- sw s4, RV_SLP_CTX_S4(t0)
- sw s5, RV_SLP_CTX_S5(t0)
- sw s6, RV_SLP_CTX_S6(t0)
- sw s7, RV_SLP_CTX_S7(t0)
- sw s8, RV_SLP_CTX_S8(t0)
- sw s9, RV_SLP_CTX_S9(t0)
- sw s10, RV_SLP_CTX_S10(t0)
- sw s11, RV_SLP_CTX_S11(t0)
- sw t3, RV_SLP_CTX_T3(t0)
- sw t4, RV_SLP_CTX_T4(t0)
- sw t5, RV_SLP_CTX_T5(t0)
- sw t6, RV_SLP_CTX_T6(t0)
- csrr t1, mstatus
- sw t1, RV_SLP_CTX_MSTATUS(t0)
- csrr t2, mtvec
- sw t2, RV_SLP_CTX_MTVEC(t0)
- csrr t3, mcause
- sw t3, RV_SLP_CTX_MCAUSE(t0)
- csrr t1, mtval
- sw t1, RV_SLP_CTX_MTVAL(t0)
- csrr t2, mie
- sw t2, RV_SLP_CTX_MIE(t0)
- csrr t3, mip
- sw t3, RV_SLP_CTX_MIP(t0)
- csrr t1, mepc
- sw t1, RV_SLP_CTX_MEPC(t0)
- /*
- !!! Let idf knows it's going to sleep !!!
- RV_SLP_STK_PMUFUNC field is used to identify whether it is going to sleep or
- has just been awakened. We use the lowest 2 bits as indication information,
- 3 means being awakened, 1 means going to sleep.
- */
- li t1, ~0x3
- lw t2, RV_SLP_CTX_PMUFUNC(t0)
- and t2, t1, t2
- ori t2, t2, 0x1
- sw t2, RV_SLP_CTX_PMUFUNC(t0)
- mv t3, t0
- csrr t0, mscratch
- sw t0, RV_SLP_CTX_T0(t3)
- #if !CONFIG_IDF_TARGET_ESP32C6 && !CONFIG_IDF_TARGET_ESP32H2
- /* writeback dcache is required here!!! */
- la t0, EXTMEM_CACHE_SYNC_MAP_REG
- li t1, 0x10
- sw t1, 0x0(t0) /* set EXTMEM_CACHE_SYNC_MAP_REG bit 4 */
- la t2, EXTMEM_CACHE_SYNC_ADDR_REG
- sw zero, 0x0(t2) /* clear EXTMEM_CACHE_SYNC_ADDR_REG */
- la t0, EXTMEM_CACHE_SYNC_SIZE_REG
- sw zero, 0x0(t0) /* clear EXTMEM_CACHE_SYNC_SIZE_REG */
- la t1, EXTMEM_CACHE_SYNC_CTRL_REG
- lw t2, 0x0(t1)
- ori t2, t2, 0x4
- sw t2, 0x0(t1)
- li t0, 0x10 /* SYNC_DONE bit */
- wait_sync_done:
- lw t2, 0x0(t1)
- and t2, t0, t2
- beqz t2, wait_sync_done
- #endif
- lw t0, RV_SLP_CTX_T0(t3)
- lw t1, RV_SLP_CTX_T1(t3)
- lw t2, RV_SLP_CTX_T2(t3)
- lw t3, RV_SLP_CTX_T3(t3)
- ret
- .size rv_core_critical_regs_save, . - rv_core_critical_regs_save
- #define CSR_PCER_U 0x800
- #define CSR_PCMR_U 0x801
- #define PCER_CYCLES (1<<0) /* count clock cycles */
- #define PCMR_GLOBAL_EN (1<<0) /* enable count */
- #define pcer CSR_PCER_U
- #define pcmr CSR_PCMR_U
- /*
- --------------------------------------------------------------------------------
- This assembly subroutine is used to restore the CPU core critical register
- context before sleep after system wakes up, modify the PMU control
- information, and return the critical register context memory object pointer.
- After the subroutine returns, continue to restore other modules of the
- system.
- --------------------------------------------------------------------------------
- */
- .section .iram1,"ax"
- .global rv_core_critical_regs_restore
- .weak rv_core_critical_regs_restore
- .type rv_core_critical_regs_restore,@function
- .global _rv_core_critical_regs_restore
- .type _rv_core_critical_regs_restore,@function
- .align 4
- _rv_core_critical_regs_restore: /* export a strong symbol to jump to here, used
- * for a static callback */
- nop
- rv_core_critical_regs_restore:
- la t0, rv_core_critical_regs_frame
- lw t0, 0(t0) /* t0 pointer to RvCoreCriticalSleepFrame object */
- beqz t0, .skip_restore /* make sure we do not jump to zero address */
- /*
- !!! Let idf knows it's sleep awake. !!!
- RV_SLP_STK_PMUFUNC field is used to identify whether it is going to sleep or
- has just been awakened. We use the lowest 2 bits as indication information,
- 3 means being awakened, 1 means going to sleep.
- */
- lw t1, RV_SLP_CTX_PMUFUNC(t0)
- ori t1, t1, 0x3
- sw t1, RV_SLP_CTX_PMUFUNC(t0)
- lw t2, RV_SLP_CTX_MEPC(t0)
- csrw mepc, t2
- lw t3, RV_SLP_CTX_MIP(t0)
- csrw mip, t3
- lw t1, RV_SLP_CTX_MIE(t0)
- csrw mie, t1
- lw t2, RV_SLP_CTX_MSTATUS(t0)
- csrw mstatus, t2
- lw t3, RV_SLP_CTX_MTVEC(t0)
- csrw mtvec, t3
- lw t1, RV_SLP_CTX_MCAUSE(t0)
- csrw mcause, t1
- lw t2, RV_SLP_CTX_MTVAL(t0)
- csrw mtval, t2
- lw t6, RV_SLP_CTX_T6(t0)
- lw t5, RV_SLP_CTX_T5(t0)
- lw t4, RV_SLP_CTX_T4(t0)
- lw t3, RV_SLP_CTX_T3(t0)
- lw s11, RV_SLP_CTX_S11(t0)
- lw s10, RV_SLP_CTX_S10(t0)
- lw s9, RV_SLP_CTX_S9(t0)
- lw s8, RV_SLP_CTX_S8(t0)
- lw s7, RV_SLP_CTX_S7(t0)
- lw s6, RV_SLP_CTX_S6(t0)
- lw s5, RV_SLP_CTX_S5(t0)
- lw s4, RV_SLP_CTX_S4(t0)
- lw s3, RV_SLP_CTX_S3(t0)
- lw s2, RV_SLP_CTX_S2(t0)
- lw a7, RV_SLP_CTX_A7(t0)
- lw a6, RV_SLP_CTX_A6(t0)
- lw a5, RV_SLP_CTX_A5(t0)
- lw a4, RV_SLP_CTX_A4(t0)
- lw a3, RV_SLP_CTX_A3(t0)
- lw a2, RV_SLP_CTX_A2(t0)
- lw a1, RV_SLP_CTX_A1(t0)
- lw a0, RV_SLP_CTX_A0(t0)
- lw s1, RV_SLP_CTX_S1(t0)
- lw s0, RV_SLP_CTX_S0(t0)
- lw t2, RV_SLP_CTX_T2(t0)
- lw t1, RV_SLP_CTX_T1(t0)
- lw tp, RV_SLP_CTX_TP(t0)
- lw gp, RV_SLP_CTX_GP(t0)
- lw sp, RV_SLP_CTX_SP(t0)
- lw ra, RV_SLP_CTX_RA(t0)
- lw t0, RV_SLP_CTX_T0(t0)
- .skip_restore:
- ret
- .size rv_core_critical_regs_restore, . - rv_core_critical_regs_restore
|