mmu_hal.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <sys/param.h>
  7. #include <stdint.h>
  8. #include <stdbool.h>
  9. #include "sdkconfig.h"
  10. #include "esp_err.h"
  11. #include "esp_attr.h"
  12. #include "hal/assert.h"
  13. #include "hal/mmu_hal.h"
  14. #include "hal/mmu_ll.h"
  15. #include "rom/cache.h"
  16. void mmu_hal_init(void)
  17. {
  18. #if CONFIG_ESP_ROM_RAM_APP_NEEDS_MMU_INIT
  19. ROM_Boot_Cache_Init();
  20. #endif
  21. //TODO: IDF-7516
  22. #if CONFIG_IDF_TARGET_ESP32P4
  23. Cache_Invalidate_All(CACHE_MAP_L2_CACHE);
  24. #endif
  25. mmu_ll_set_page_size(0, CONFIG_MMU_PAGE_SIZE);
  26. mmu_hal_unmap_all();
  27. }
  28. void mmu_hal_unmap_all(void)
  29. {
  30. #if MMU_LL_MMU_PER_TARGET
  31. mmu_ll_unmap_all(MMU_LL_FLASH_MMU_ID);
  32. mmu_ll_unmap_all(MMU_LL_PSRAM_MMU_ID);
  33. #else
  34. mmu_ll_unmap_all(0);
  35. #if !CONFIG_FREERTOS_UNICORE
  36. mmu_ll_unmap_all(1);
  37. #endif
  38. #endif
  39. }
  40. uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num)
  41. {
  42. mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
  43. uint32_t shift_code = 0;
  44. switch (page_size) {
  45. case MMU_PAGE_64KB:
  46. shift_code = 16;
  47. break;
  48. case MMU_PAGE_32KB:
  49. shift_code = 15;
  50. break;
  51. case MMU_PAGE_16KB:
  52. shift_code = 14;
  53. break;
  54. default:
  55. HAL_ASSERT(shift_code);
  56. }
  57. return page_num << shift_code;
  58. }
  59. uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes)
  60. {
  61. mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
  62. uint32_t shift_code = 0;
  63. switch (page_size) {
  64. case MMU_PAGE_64KB:
  65. shift_code = 16;
  66. break;
  67. case MMU_PAGE_32KB:
  68. shift_code = 15;
  69. break;
  70. case MMU_PAGE_16KB:
  71. shift_code = 14;
  72. break;
  73. default:
  74. HAL_ASSERT(shift_code);
  75. }
  76. return bytes >> shift_code;
  77. }
  78. void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, uint32_t paddr, uint32_t len, uint32_t *out_len)
  79. {
  80. uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
  81. HAL_ASSERT(vaddr % page_size_in_bytes == 0);
  82. HAL_ASSERT(paddr % page_size_in_bytes == 0);
  83. HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, len));
  84. HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
  85. uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
  86. uint32_t entry_id = 0;
  87. uint32_t mmu_val; //This is the physical address in the format that MMU supported
  88. *out_len = mmu_hal_pages_to_bytes(mmu_id, page_num);
  89. mmu_val = mmu_ll_format_paddr(mmu_id, paddr, mem_type);
  90. while (page_num) {
  91. entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
  92. mmu_ll_write_entry(mmu_id, entry_id, mmu_val, mem_type);
  93. vaddr += page_size_in_bytes;
  94. mmu_val++;
  95. page_num--;
  96. }
  97. }
  98. void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len)
  99. {
  100. uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
  101. HAL_ASSERT(vaddr % page_size_in_bytes == 0);
  102. HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
  103. uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
  104. uint32_t entry_id = 0;
  105. while (page_num) {
  106. entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
  107. mmu_ll_set_entry_invalid(mmu_id, entry_id);
  108. vaddr += page_size_in_bytes;
  109. page_num--;
  110. }
  111. }
  112. bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target)
  113. {
  114. HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
  115. uint32_t entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
  116. if (!mmu_ll_check_entry_valid(mmu_id, entry_id)) {
  117. return false;
  118. }
  119. uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
  120. uint32_t offset = (uint32_t)vaddr % page_size_in_bytes;
  121. *out_target = mmu_ll_get_entry_target(mmu_id, entry_id);
  122. uint32_t paddr_base = mmu_ll_entry_id_to_paddr_base(mmu_id, entry_id);
  123. *out_paddr = paddr_base | offset;
  124. return true;
  125. }
  126. bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
  127. {
  128. HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, 1));
  129. uint32_t mmu_val = mmu_ll_format_paddr(mmu_id, paddr, target);
  130. int entry_id = mmu_ll_find_entry_id_based_on_map_value(mmu_id, mmu_val, target);
  131. if (entry_id == -1) {
  132. return false;
  133. }
  134. uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
  135. uint32_t offset = paddr % page_size_in_bytes;
  136. uint32_t vaddr_base = mmu_ll_entry_id_to_vaddr_base(mmu_id, entry_id, type);
  137. if (vaddr_base == 0) {
  138. return false;
  139. }
  140. *out_vaddr = vaddr_base | offset;
  141. return true;
  142. }
  143. bool mmu_hal_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
  144. {
  145. return mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr_start, len, type);
  146. }