cache_err_int.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. // Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. /*
  15. The cache has an interrupt that can be raised as soon as an access to a cached
  16. region (flash, psram) is done without the cache being enabled. We use that here
  17. to panic the CPU, which from a debugging perspective is better than grabbing bad
  18. data from the bus.
  19. */
  20. #include <stdint.h>
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <stdbool.h>
  24. #include "freertos/FreeRTOS.h"
  25. #include "esp_err.h"
  26. #include "esp_intr.h"
  27. #include "esp_attr.h"
  28. #include "soc/dport_reg.h"
  29. #include "sdkconfig.h"
  30. #include "esp_dport_access.h"
  31. void esp_cache_err_int_init()
  32. {
  33. uint32_t core_id = xPortGetCoreID();
  34. ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
  35. // We do not register a handler for the interrupt because it is interrupt
  36. // level 4 which is not serviceable from C. Instead, xtensa_vectors.S has
  37. // a call to the panic handler for
  38. // this interrupt.
  39. intr_matrix_set(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_CACHEERR_INUM);
  40. // Enable invalid cache access interrupt when the cache is disabled.
  41. // When the interrupt happens, we can not determine the CPU where the
  42. // invalid cache access has occurred. We enable the interrupt to catch
  43. // invalid access on both CPUs, but the interrupt is connected to the
  44. // CPU which happens to call this function.
  45. // For this reason, panic handler backtrace will not be correct if the
  46. // interrupt is connected to PRO CPU and invalid access happens on the APP
  47. // CPU.
  48. if (core_id == PRO_CPU_NUM) {
  49. DPORT_SET_PERI_REG_MASK(DPORT_CACHE_IA_INT_EN_REG,
  50. DPORT_CACHE_IA_INT_PRO_OPPOSITE |
  51. DPORT_CACHE_IA_INT_PRO_DRAM1 |
  52. DPORT_CACHE_IA_INT_PRO_DROM0 |
  53. DPORT_CACHE_IA_INT_PRO_IROM0 |
  54. DPORT_CACHE_IA_INT_PRO_IRAM0 |
  55. DPORT_CACHE_IA_INT_PRO_IRAM1);
  56. } else {
  57. DPORT_SET_PERI_REG_MASK(DPORT_CACHE_IA_INT_EN_REG,
  58. DPORT_CACHE_IA_INT_APP_OPPOSITE |
  59. DPORT_CACHE_IA_INT_APP_DRAM1 |
  60. DPORT_CACHE_IA_INT_APP_DROM0 |
  61. DPORT_CACHE_IA_INT_APP_IROM0 |
  62. DPORT_CACHE_IA_INT_APP_IRAM0 |
  63. DPORT_CACHE_IA_INT_APP_IRAM1);
  64. }
  65. ESP_INTR_ENABLE(ETS_CACHEERR_INUM);
  66. }
  67. int IRAM_ATTR esp_cache_err_get_cpuid()
  68. {
  69. const uint32_t pro_mask =
  70. DPORT_PRO_CPU_DISABLED_CACHE_IA_DRAM1 |
  71. DPORT_PRO_CPU_DISABLED_CACHE_IA_DROM0 |
  72. DPORT_PRO_CPU_DISABLED_CACHE_IA_IROM0 |
  73. DPORT_PRO_CPU_DISABLED_CACHE_IA_IRAM0 |
  74. DPORT_PRO_CPU_DISABLED_CACHE_IA_IRAM1 |
  75. DPORT_APP_CPU_DISABLED_CACHE_IA_OPPOSITE;
  76. if (DPORT_GET_PERI_REG_MASK(DPORT_PRO_DCACHE_DBUG3_REG, pro_mask)) {
  77. return PRO_CPU_NUM;
  78. }
  79. const uint32_t app_mask =
  80. DPORT_APP_CPU_DISABLED_CACHE_IA_DRAM1 |
  81. DPORT_APP_CPU_DISABLED_CACHE_IA_DROM0 |
  82. DPORT_APP_CPU_DISABLED_CACHE_IA_IROM0 |
  83. DPORT_APP_CPU_DISABLED_CACHE_IA_IRAM0 |
  84. DPORT_APP_CPU_DISABLED_CACHE_IA_IRAM1 |
  85. DPORT_PRO_CPU_DISABLED_CACHE_IA_OPPOSITE;
  86. if (DPORT_GET_PERI_REG_MASK(DPORT_APP_DCACHE_DBUG3_REG, app_mask)) {
  87. return APP_CPU_NUM;
  88. }
  89. return -1;
  90. }