esp_cache.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /*
  2. * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <sys/param.h>
  7. #include <inttypes.h>
  8. #include <string.h>
  9. #include "sdkconfig.h"
  10. #include "esp_check.h"
  11. #include "esp_log.h"
  12. #include "esp_heap_caps.h"
  13. #include "esp_rom_caps.h"
  14. #include "soc/soc_caps.h"
  15. #include "hal/mmu_hal.h"
  16. #include "hal/cache_hal.h"
  17. #include "hal/cache_ll.h"
  18. #include "esp_cache.h"
  19. #include "esp_private/esp_cache_private.h"
  20. #include "esp_private/critical_section.h"
  21. static const char *TAG = "cache";
  22. #define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
  23. DEFINE_CRIT_SECTION_LOCK_STATIC(s_spinlock);
  24. esp_err_t esp_cache_msync(void *addr, size_t size, int flags)
  25. {
  26. ESP_RETURN_ON_FALSE_ISR(addr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  27. uint32_t addr_end = 0;
  28. bool ovf = __builtin_add_overflow((uint32_t)addr, size, &addr_end);
  29. ESP_EARLY_LOGV(TAG, "addr_end: 0x%x\n", addr_end);
  30. ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow");
  31. bool both_dir = (flags & ESP_CACHE_MSYNC_FLAG_DIR_C2M) && (flags & ESP_CACHE_MSYNC_FLAG_DIR_M2C);
  32. bool both_type = (flags & ESP_CACHE_MSYNC_FLAG_TYPE_DATA) && (flags & ESP_CACHE_MSYNC_FLAG_TYPE_INST);
  33. ESP_RETURN_ON_FALSE_ISR(!both_dir && !both_type, ESP_ERR_INVALID_ARG, TAG, "both C2M and M2C directions, or both data and instruction type are selected, you should only select one direction or one type");
  34. uint32_t vaddr = (uint32_t)addr;
  35. bool valid = false;
  36. uint32_t cache_level = 0;
  37. uint32_t cache_id = 0;
  38. valid = cache_hal_vaddr_to_cache_level_id(vaddr, size, &cache_level, &cache_id);
  39. ESP_RETURN_ON_FALSE_ISR(valid, ESP_ERR_INVALID_ARG, TAG, "invalid addr or null pointer");
  40. cache_type_t cache_type = CACHE_TYPE_DATA;
  41. if (flags & ESP_CACHE_MSYNC_FLAG_TYPE_INST) {
  42. cache_type = CACHE_TYPE_INSTRUCTION;
  43. }
  44. uint32_t cache_line_size = cache_hal_get_cache_line_size(cache_level, cache_type);
  45. if ((flags & ESP_CACHE_MSYNC_FLAG_UNALIGNED) == 0) {
  46. bool aligned_addr = (((uint32_t)addr % cache_line_size) == 0) && ((size % cache_line_size) == 0);
  47. ESP_RETURN_ON_FALSE_ISR(aligned_addr, ESP_ERR_INVALID_ARG, TAG, "start address: 0x%x, or the size: 0x%x is(are) not aligned with cache line size (0x%x)B", (uint32_t)addr, size, cache_line_size);
  48. }
  49. if (flags & ESP_CACHE_MSYNC_FLAG_DIR_M2C) {
  50. ESP_EARLY_LOGV(TAG, "M2C DIR");
  51. esp_os_enter_critical_safe(&s_spinlock);
  52. //Add preload feature / flag here, IDF-7800
  53. valid = cache_hal_invalidate_addr(vaddr, size);
  54. esp_os_exit_critical_safe(&s_spinlock);
  55. assert(valid);
  56. } else {
  57. ESP_EARLY_LOGV(TAG, "C2M DIR");
  58. if (flags & ESP_CACHE_MSYNC_FLAG_TYPE_INST) {
  59. ESP_RETURN_ON_FALSE_ISR(false, ESP_ERR_INVALID_ARG, TAG, "C2M direction doesn't support instruction type");
  60. }
  61. #if SOC_CACHE_WRITEBACK_SUPPORTED
  62. esp_os_enter_critical_safe(&s_spinlock);
  63. valid = cache_hal_writeback_addr(vaddr, size);
  64. esp_os_exit_critical_safe(&s_spinlock);
  65. assert(valid);
  66. if (flags & ESP_CACHE_MSYNC_FLAG_INVALIDATE) {
  67. esp_os_enter_critical_safe(&s_spinlock);
  68. valid &= cache_hal_invalidate_addr(vaddr, size);
  69. esp_os_exit_critical_safe(&s_spinlock);
  70. }
  71. assert(valid);
  72. #endif
  73. }
  74. return ESP_OK;
  75. }
  76. esp_err_t esp_cache_aligned_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
  77. {
  78. ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  79. uint32_t cache_level = CACHE_LL_LEVEL_INT_MEM;
  80. uint32_t heap_caps = 0;
  81. uint32_t data_cache_line_size = 0;
  82. void *ptr = NULL;
  83. if (flags & ESP_CACHE_MALLOC_FLAG_PSRAM) {
  84. cache_level = CACHE_LL_LEVEL_EXT_MEM;
  85. heap_caps |= MALLOC_CAP_SPIRAM;
  86. } else {
  87. heap_caps |= MALLOC_CAP_INTERNAL;
  88. if (flags & ESP_CACHE_MALLOC_FLAG_DMA) {
  89. heap_caps |= MALLOC_CAP_DMA;
  90. }
  91. }
  92. data_cache_line_size = cache_hal_get_cache_line_size(cache_level, CACHE_TYPE_DATA);
  93. if (data_cache_line_size == 0) {
  94. //default alignment
  95. data_cache_line_size = 4;
  96. }
  97. size = ALIGN_UP_BY(size, data_cache_line_size);
  98. ptr = heap_caps_aligned_alloc(data_cache_line_size, size, heap_caps);
  99. ESP_RETURN_ON_FALSE_ISR(ptr, ESP_ERR_NO_MEM, TAG, "no enough heap memory for (%"PRId32")B alignment", data_cache_line_size);
  100. *out_ptr = ptr;
  101. if (actual_size) {
  102. *actual_size = size;
  103. }
  104. return ESP_OK;
  105. }
  106. esp_err_t esp_cache_aligned_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size)
  107. {
  108. ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  109. esp_err_t ret = ESP_FAIL;
  110. size_t size_bytes = 0;
  111. bool ovf = false;
  112. ovf = __builtin_mul_overflow(n, size, &size_bytes);
  113. ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow");
  114. void *ptr = NULL;
  115. ret = esp_cache_aligned_malloc(size_bytes, flags, &ptr, actual_size);
  116. if (ret == ESP_OK) {
  117. memset(ptr, 0, size_bytes);
  118. *out_ptr = ptr;
  119. }
  120. return ret;
  121. }
  122. esp_err_t esp_cache_get_alignment(uint32_t flags, size_t *out_alignment)
  123. {
  124. ESP_RETURN_ON_FALSE(out_alignment, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  125. uint32_t cache_level = CACHE_LL_LEVEL_INT_MEM;
  126. uint32_t data_cache_line_size = 0;
  127. if (flags & ESP_CACHE_MALLOC_FLAG_PSRAM) {
  128. cache_level = CACHE_LL_LEVEL_EXT_MEM;
  129. }
  130. data_cache_line_size = cache_hal_get_cache_line_size(cache_level, CACHE_TYPE_DATA);
  131. if (data_cache_line_size == 0) {
  132. //default alignment
  133. data_cache_line_size = 4;
  134. }
  135. *out_alignment = data_cache_line_size;
  136. return ESP_OK;
  137. }