cache_err_int.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /*
  7. The cache has an interrupt that can be raised as soon as an access to a cached
  8. region (flash, psram) is done without the cache being enabled. We use that here
  9. to panic the CPU, which from a debugging perspective is better than grabbing bad
  10. data from the bus.
  11. */
  12. #include <stdint.h>
  13. #include <stdio.h>
  14. #include <stdlib.h>
  15. #include <stdbool.h>
  16. #include "esp_err.h"
  17. #include "esp_attr.h"
  18. #include "esp_cpu.h"
  19. #include "esp_intr_alloc.h"
  20. #include "soc/dport_reg.h"
  21. #include "esp_rom_sys.h"
  22. #include "sdkconfig.h"
  23. void esp_cache_err_int_init(void)
  24. {
  25. uint32_t core_id = esp_cpu_get_core_id();
  26. ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
  27. // We do not register a handler for the interrupt because it is interrupt
  28. // level 4 which is not serviceable from C. Instead, xtensa_vectors.S has
  29. // a call to the panic handler for
  30. // this interrupt.
  31. esp_rom_route_intr_matrix(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_MEMACCESS_ERR_INUM);
  32. // Enable invalid cache access interrupt when the cache is disabled.
  33. // When the interrupt happens, we can not determine the CPU where the
  34. // invalid cache access has occurred. We enable the interrupt to catch
  35. // invalid access on both CPUs, but the interrupt is connected to the
  36. // CPU which happens to call this function.
  37. // For this reason, panic handler backtrace will not be correct if the
  38. // interrupt is connected to PRO CPU and invalid access happens on the APP
  39. // CPU.
  40. if (core_id == PRO_CPU_NUM) {
  41. DPORT_SET_PERI_REG_MASK(DPORT_CACHE_IA_INT_EN_REG,
  42. DPORT_CACHE_IA_INT_PRO_OPPOSITE |
  43. DPORT_CACHE_IA_INT_PRO_DRAM1 |
  44. DPORT_CACHE_IA_INT_PRO_DROM0 |
  45. DPORT_CACHE_IA_INT_PRO_IROM0 |
  46. DPORT_CACHE_IA_INT_PRO_IRAM0 |
  47. DPORT_CACHE_IA_INT_PRO_IRAM1);
  48. } else {
  49. DPORT_SET_PERI_REG_MASK(DPORT_CACHE_IA_INT_EN_REG,
  50. DPORT_CACHE_IA_INT_APP_OPPOSITE |
  51. DPORT_CACHE_IA_INT_APP_DRAM1 |
  52. DPORT_CACHE_IA_INT_APP_DROM0 |
  53. DPORT_CACHE_IA_INT_APP_IROM0 |
  54. DPORT_CACHE_IA_INT_APP_IRAM0 |
  55. DPORT_CACHE_IA_INT_APP_IRAM1);
  56. }
  57. ESP_INTR_ENABLE(ETS_MEMACCESS_ERR_INUM);
  58. }
  59. int esp_cache_err_get_cpuid(void)
  60. {
  61. const uint32_t pro_mask =
  62. DPORT_PRO_CPU_DISABLED_CACHE_IA_DRAM1 |
  63. DPORT_PRO_CPU_DISABLED_CACHE_IA_DROM0 |
  64. DPORT_PRO_CPU_DISABLED_CACHE_IA_IROM0 |
  65. DPORT_PRO_CPU_DISABLED_CACHE_IA_IRAM0 |
  66. DPORT_PRO_CPU_DISABLED_CACHE_IA_IRAM1 |
  67. DPORT_APP_CPU_DISABLED_CACHE_IA_OPPOSITE;
  68. if (DPORT_GET_PERI_REG_MASK(DPORT_PRO_DCACHE_DBUG3_REG, pro_mask)) {
  69. return PRO_CPU_NUM;
  70. }
  71. const uint32_t app_mask =
  72. DPORT_APP_CPU_DISABLED_CACHE_IA_DRAM1 |
  73. DPORT_APP_CPU_DISABLED_CACHE_IA_DROM0 |
  74. DPORT_APP_CPU_DISABLED_CACHE_IA_IROM0 |
  75. DPORT_APP_CPU_DISABLED_CACHE_IA_IRAM0 |
  76. DPORT_APP_CPU_DISABLED_CACHE_IA_IRAM1 |
  77. DPORT_PRO_CPU_DISABLED_CACHE_IA_OPPOSITE;
  78. if (DPORT_GET_PERI_REG_MASK(DPORT_APP_DCACHE_DBUG3_REG, app_mask)) {
  79. return APP_CPU_NUM;
  80. }
  81. return -1;
  82. }