esp_cache.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <sys/param.h>
  7. #include <inttypes.h>
  8. #include "sdkconfig.h"
  9. #include "esp_check.h"
  10. #include "esp_log.h"
  11. #include "soc/soc_caps.h"
  12. #include "hal/mmu_hal.h"
  13. #include "hal/cache_hal.h"
  14. #include "esp_cache.h"
  15. #include "esp_private/critical_section.h"
  16. static const char *TAG = "cache";
  17. #if SOC_CACHE_WRITEBACK_SUPPORTED
  18. DEFINE_CRIT_SECTION_LOCK_STATIC(s_spinlock);
  19. void s_cache_freeze(void)
  20. {
  21. #if SOC_CACHE_FREEZE_SUPPORTED
  22. cache_hal_freeze(CACHE_TYPE_DATA | CACHE_TYPE_INSTRUCTION);
  23. #endif
  24. /**
  25. * For writeback supported, but the freeze not supported chip (Now only S2),
  26. * as it's single core, the critical section is enough to prevent preemption from an non-IRAM ISR
  27. */
  28. }
  29. void s_cache_unfreeze(void)
  30. {
  31. #if SOC_CACHE_FREEZE_SUPPORTED
  32. cache_hal_unfreeze(CACHE_TYPE_DATA | CACHE_TYPE_INSTRUCTION);
  33. #endif
  34. /**
  35. * Similarly, for writeback supported, but the freeze not supported chip (Now only S2),
  36. * we don't need to do more
  37. */
  38. }
  39. #endif //#if SOC_CACHE_WRITEBACK_SUPPORTED
  40. esp_err_t esp_cache_msync(void *addr, size_t size, int flags)
  41. {
  42. ESP_RETURN_ON_FALSE_ISR(addr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  43. ESP_RETURN_ON_FALSE_ISR(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)addr, size, MMU_VADDR_DATA), ESP_ERR_INVALID_ARG, TAG, "invalid address");
  44. #if SOC_CACHE_WRITEBACK_SUPPORTED
  45. if ((flags & ESP_CACHE_MSYNC_FLAG_UNALIGNED) == 0) {
  46. esp_os_enter_critical_safe(&s_spinlock);
  47. uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA);
  48. esp_os_exit_critical_safe(&s_spinlock);
  49. ESP_RETURN_ON_FALSE_ISR(((uint32_t)addr % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "start address isn't aligned with the data cache line size (%d)B", data_cache_line_size);
  50. ESP_RETURN_ON_FALSE_ISR((size % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "size isn't aligned with the data cache line size (%d)B", data_cache_line_size);
  51. ESP_RETURN_ON_FALSE_ISR((((uint32_t)addr + size) % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "end address isn't aligned with the data cache line size (%d)B", data_cache_line_size);
  52. }
  53. uint32_t vaddr = (uint32_t)addr;
  54. esp_os_enter_critical_safe(&s_spinlock);
  55. s_cache_freeze();
  56. cache_hal_writeback_addr(vaddr, size);
  57. if (flags & ESP_CACHE_MSYNC_FLAG_INVALIDATE) {
  58. cache_hal_invalidate_addr(vaddr, size);
  59. }
  60. s_cache_unfreeze();
  61. esp_os_exit_critical_safe(&s_spinlock);
  62. #endif
  63. return ESP_OK;
  64. }