esp_mmu_map.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdint.h>
  7. #include <string.h>
  8. #include <sys/param.h>
  9. #include <sys/queue.h>
  10. #include <inttypes.h>
  11. #include "sdkconfig.h"
  12. #include "esp_attr.h"
  13. #include "esp_log.h"
  14. #include "esp_check.h"
  15. #include "esp_heap_caps.h"
  16. #include "soc/soc_caps.h"
  17. #include "hal/cache_types.h"
  18. #include "hal/cache_hal.h"
  19. #include "hal/cache_ll.h"
  20. #include "hal/mmu_types.h"
  21. #include "hal/mmu_hal.h"
  22. #include "hal/mmu_ll.h"
  23. #include "esp_private/cache_utils.h"
  24. #include "esp_private/esp_cache_esp32_private.h"
  25. #include "esp_private/esp_mmu_map_private.h"
  26. #include "ext_mem_layout.h"
  27. #include "esp_mmu_map.h"
  28. //This is for size align
  29. #define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
  30. //This is for vaddr align
  31. #define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
  32. //This flag indicates the memory region is merged, we don't care about it anymore
  33. #define MEM_REGION_MERGED -1
  34. /**
  35. * We have some hw related tests for vaddr region capabilites
  36. * Use this macro to disable paddr check as we need to reuse certain paddr blocks
  37. */
  38. #define ENABLE_PADDR_CHECK !ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR
  39. static DRAM_ATTR const char *TAG = "mmap";
  40. /**
  41. * @brief MMU Memory Mapping Driver
  42. *
  43. * Driver Backgrounds:
  44. *
  45. * --------------------------------------------------------------------------------------------------------
  46. * Memory Pool |
  47. * --------------------------------------------------------------------------------------------------------
  48. * | Memory Region 0 | Memory Region 1 | ... |
  49. * --------------------------------------------------------------------------------------------------------
  50. * | Block 0 | Slot 0 | Block 1 | Block 2 | ... | Slot 1 (final slot) | ... |
  51. * --------------------------------------------------------------------------------------------------------
  52. *
  53. * - A block is a piece of vaddr range that is dynamically mapped. Blocks are doubly linked:
  54. * Block 0 <-> Block 1 <-> Block 2
  55. * - A Slot is the vaddr range between 2 blocks.
  56. */
  57. /**
  58. * Struct for a block
  59. */
  60. typedef struct mem_block_ {
  61. uint32_t laddr_start; //linear address start of this block
  62. uint32_t laddr_end; //linear address end of this block
  63. intptr_t vaddr_start; //virtual address start of this block
  64. intptr_t vaddr_end; //virtual address end of this block
  65. size_t size; //size of this block, should be aligned to MMU page size
  66. int caps; //caps of this block, `mmu_mem_caps_t`
  67. uint32_t paddr_start; //physical address start of this block
  68. uint32_t paddr_end; //physical address end of this block
  69. mmu_target_t target; //physical target that this block is mapped to
  70. TAILQ_ENTRY(mem_block_) entries; //link entry
  71. } mem_block_t;
  72. /**
  73. * Struct for a memory region
  74. */
  75. typedef struct mem_region_ {
  76. cache_bus_mask_t bus_id; //cache bus mask of this region
  77. uint32_t start; //linear address start of this region
  78. uint32_t end; //linear address end of this region
  79. size_t region_size; //region size, in bytes
  80. uint32_t free_head; //linear address free head of this region
  81. size_t max_slot_size; //max slot size within this region
  82. int caps; //caps of this region, `mmu_mem_caps_t`
  83. mmu_target_t targets; //physical targets that this region is supported
  84. TAILQ_HEAD(mem_block_head_, mem_block_) mem_block_head; //link head of allocated blocks within this region
  85. } mem_region_t;
  86. typedef struct {
  87. /**
  88. * number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
  89. */
  90. uint32_t num_regions;
  91. /**
  92. * This saves the available MMU linear address regions,
  93. * after reserving flash .rodata and .text, and after coalescing.
  94. * Only the first `num_regions` items are valid
  95. */
  96. mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
  97. } mmu_ctx_t;
  98. static mmu_ctx_t s_mmu_ctx;
  99. #if ENABLE_PADDR_CHECK
  100. static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
  101. static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
  102. #endif //#if ENABLE_PADDR_CHECK
  103. #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
  104. static cache_bus_mask_t s_get_bus_mask(uint32_t vaddr_start, uint32_t len)
  105. {
  106. #if CACHE_LL_EXT_MEM_VIA_L2CACHE
  107. return cache_ll_l2_get_bus(0, vaddr_start, len);
  108. #else
  109. return cache_ll_l1_get_bus(0, vaddr_start, len);
  110. #endif
  111. }
  112. static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
  113. {
  114. /**
  115. * We follow the way how 1st bootloader load flash .text:
  116. *
  117. * - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
  118. * we strongly rely on this to calculate the .text length
  119. */
  120. extern int _instruction_reserved_start;
  121. extern int _instruction_reserved_end;
  122. size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
  123. assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
  124. irom_len_to_reserve += (uint32_t)&_instruction_reserved_start - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
  125. irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
  126. cache_bus_mask_t bus_mask = s_get_bus_mask((uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
  127. for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
  128. if (bus_mask & hw_mem_regions[i].bus_id) {
  129. if (hw_mem_regions[i].region_size <= irom_len_to_reserve) {
  130. hw_mem_regions[i].free_head = hw_mem_regions[i].end;
  131. hw_mem_regions[i].max_slot_size = 0;
  132. irom_len_to_reserve -= hw_mem_regions[i].region_size;
  133. } else {
  134. hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
  135. hw_mem_regions[i].max_slot_size -= irom_len_to_reserve;
  136. }
  137. }
  138. }
  139. }
  140. static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
  141. {
  142. /**
  143. * Similarly, we follow the way how 1st bootloader load flash .rodata:
  144. */
  145. extern int _rodata_reserved_start;
  146. extern int _rodata_reserved_end;
  147. size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
  148. assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
  149. drom_len_to_reserve += (uint32_t)&_rodata_reserved_start - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
  150. drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
  151. cache_bus_mask_t bus_mask = s_get_bus_mask((uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
  152. for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
  153. if (bus_mask & hw_mem_regions[i].bus_id) {
  154. if (hw_mem_regions[i].region_size <= drom_len_to_reserve) {
  155. hw_mem_regions[i].free_head = hw_mem_regions[i].end;
  156. hw_mem_regions[i].max_slot_size = 0;
  157. drom_len_to_reserve -= hw_mem_regions[i].region_size;
  158. } else {
  159. hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
  160. hw_mem_regions[i].max_slot_size -= drom_len_to_reserve;
  161. }
  162. }
  163. }
  164. }
  165. #endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
  166. void esp_mmu_map_init(void)
  167. {
  168. mem_region_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
  169. for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
  170. hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
  171. hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
  172. hw_mem_regions[i].region_size = g_mmu_mem_regions[i].size;
  173. hw_mem_regions[i].max_slot_size = g_mmu_mem_regions[i].size;
  174. hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
  175. hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
  176. hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
  177. hw_mem_regions[i].targets = g_mmu_mem_regions[i].targets;
  178. #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
  179. assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
  180. #endif
  181. assert(hw_mem_regions[i].region_size % CONFIG_MMU_PAGE_SIZE == 0);
  182. }
  183. #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
  184. //First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
  185. s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
  186. s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
  187. #endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
  188. if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
  189. //Now we can coalesce adjacent regions
  190. for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
  191. mem_region_t *a = &hw_mem_regions[i - 1];
  192. mem_region_t *b = &hw_mem_regions[i];
  193. if ((b->free_head == a->end) && (b->caps == a->caps) && (b->targets == a->targets)) {
  194. a->caps = MEM_REGION_MERGED;
  195. b->bus_id |= a->bus_id;
  196. b->start = a->start;
  197. b->region_size += a->region_size;
  198. b->free_head = a->free_head;
  199. b->max_slot_size += a->max_slot_size;
  200. }
  201. }
  202. }
  203. //Count the mem regions left after coalescing
  204. uint32_t region_num = 0;
  205. for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
  206. if (hw_mem_regions[i].caps != MEM_REGION_MERGED) {
  207. region_num++;
  208. }
  209. }
  210. ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
  211. //Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
  212. uint32_t available_region_idx = 0;
  213. s_mmu_ctx.num_regions = region_num;
  214. for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
  215. if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
  216. continue;
  217. }
  218. memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mem_region_t));
  219. available_region_idx++;
  220. }
  221. for (int i = 0; i < available_region_idx; i++) {
  222. TAILQ_INIT(&s_mmu_ctx.mem_regions[i].mem_block_head);
  223. }
  224. assert(available_region_idx == region_num);
  225. }
  226. static esp_err_t s_mem_caps_check(mmu_mem_caps_t caps)
  227. {
  228. if (caps & MMU_MEM_CAP_EXEC) {
  229. if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
  230. //None of the executable memory are expected to be 8-bit accessible or writable.
  231. return ESP_ERR_INVALID_ARG;
  232. }
  233. caps |= MMU_MEM_CAP_32BIT;
  234. }
  235. return ESP_OK;
  236. }
  237. esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len)
  238. {
  239. ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  240. ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
  241. *out_len = 0;
  242. size_t max = 0;
  243. for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
  244. if (((s_mmu_ctx.mem_regions[i].caps & caps) == caps) && ((s_mmu_ctx.mem_regions[i].targets & target) == target)) {
  245. if (s_mmu_ctx.mem_regions[i].max_slot_size > max) {
  246. max = s_mmu_ctx.mem_regions[i].max_slot_size;
  247. }
  248. }
  249. }
  250. *out_len = max;
  251. return ESP_OK;
  252. }
  253. static int32_t s_find_available_region(mem_region_t *mem_regions, uint32_t region_nums, size_t size, mmu_mem_caps_t caps, mmu_target_t target)
  254. {
  255. int32_t found_region_id = -1;
  256. for (int i = 0; i < region_nums; i++) {
  257. if (((mem_regions[i].caps & caps) == caps) && ((mem_regions[i].targets & target) == target)) {
  258. if (mem_regions[i].max_slot_size >= size) {
  259. found_region_id = i;
  260. break;
  261. }
  262. }
  263. }
  264. return found_region_id;
  265. }
  266. esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr)
  267. {
  268. ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  269. ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
  270. size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
  271. uint32_t laddr = 0;
  272. int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
  273. if (found_region_id == -1) {
  274. ESP_EARLY_LOGE(TAG, "no such vaddr range");
  275. return ESP_ERR_NOT_FOUND;
  276. }
  277. laddr = (uint32_t)s_mmu_ctx.mem_regions[found_region_id].free_head;
  278. s_mmu_ctx.mem_regions[found_region_id].free_head += aligned_size;
  279. s_mmu_ctx.mem_regions[found_region_id].max_slot_size -= aligned_size;
  280. ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
  281. uint32_t vaddr = 0;
  282. if (caps & MMU_MEM_CAP_EXEC) {
  283. vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION, target);
  284. } else {
  285. vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA, target);
  286. }
  287. *out_ptr = (void *)vaddr;
  288. return ESP_OK;
  289. }
  290. IRAM_ATTR esp_err_t esp_mmu_paddr_find_caps(const esp_paddr_t paddr, mmu_mem_caps_t *out_caps)
  291. {
  292. mem_region_t *region = NULL;
  293. mem_block_t *mem_block = NULL;
  294. bool found = false;
  295. mem_block_t *found_block = NULL;
  296. if (out_caps == NULL) {
  297. return ESP_ERR_INVALID_ARG;
  298. }
  299. for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
  300. region = &s_mmu_ctx.mem_regions[i];
  301. TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
  302. if (mem_block == TAILQ_FIRST(&region->mem_block_head) || mem_block == TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
  303. //we don't care the dummy_head and the dummy_tail
  304. continue;
  305. }
  306. //now we are only traversing the actual dynamically allocated blocks, dummy_head and dummy_tail are excluded already
  307. if (mem_block->paddr_start == paddr) {
  308. found = true;
  309. found_block = mem_block;
  310. break;
  311. }
  312. }
  313. }
  314. if (!found) {
  315. return ESP_ERR_NOT_FOUND;
  316. }
  317. *out_caps = found_block->caps;
  318. return ESP_OK;
  319. }
  320. static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start, uint32_t size)
  321. {
  322. #if CONFIG_IDF_TARGET_ESP32
  323. /**
  324. * On ESP32, due to hardware limitation, we don't have an
  325. * easy way to sync between cache and external memory wrt
  326. * certain range. So we do a full sync here
  327. */
  328. cache_sync();
  329. #else //Other chips
  330. cache_hal_invalidate_addr(vaddr_start, size);
  331. #endif // CONFIG_IDF_TARGET_ESP32
  332. }
  333. #if MMU_LL_MMU_PER_TARGET
  334. FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
  335. {
  336. uint32_t actual_mapped_len = 0;
  337. uint32_t mmu_id = 0;
  338. if (target == MMU_TARGET_FLASH0) {
  339. mmu_id = MMU_LL_FLASH_MMU_ID;
  340. } else {
  341. mmu_id = MMU_LL_PSRAM_MMU_ID;
  342. }
  343. mmu_hal_map_region(mmu_id, target, vaddr_start, paddr_start, size, &actual_mapped_len);
  344. return actual_mapped_len;
  345. }
  346. #else
  347. FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
  348. {
  349. uint32_t actual_mapped_len = 0;
  350. mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len);
  351. #if (SOC_MMU_PERIPH_NUM == 2)
  352. #if !CONFIG_FREERTOS_UNICORE
  353. mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len);
  354. #endif // #if !CONFIG_FREERTOS_UNICORE
  355. #endif // #if (SOC_MMU_PERIPH_NUM == 2)
  356. return actual_mapped_len;
  357. }
  358. #endif
  359. static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
  360. {
  361. /**
  362. * Disable Cache, after this function, involved code and data should be placed in internal RAM.
  363. *
  364. * @note we call this for now, but this will be refactored to move out of `spi_flash`
  365. */
  366. spi_flash_disable_interrupts_caches_and_other_cpu();
  367. uint32_t actual_mapped_len = s_mapping_operation(target, vaddr_start, paddr_start, size);
  368. cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
  369. cache_ll_l1_enable_bus(0, bus_mask);
  370. #if !CONFIG_FREERTOS_UNICORE
  371. bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
  372. cache_ll_l1_enable_bus(1, bus_mask);
  373. #endif
  374. s_do_cache_invalidate(vaddr_start, size);
  375. //enable Cache, after this function, internal RAM access is no longer mandatory
  376. spi_flash_enable_interrupts_caches_and_other_cpu();
  377. ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len);
  378. }
  379. esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr)
  380. {
  381. esp_err_t ret = ESP_FAIL;
  382. ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  383. #if !SOC_SPIRAM_SUPPORTED || CONFIG_IDF_TARGET_ESP32
  384. ESP_RETURN_ON_FALSE(!(target & MMU_TARGET_PSRAM0), ESP_ERR_NOT_SUPPORTED, TAG, "PSRAM is not supported");
  385. #endif
  386. ESP_RETURN_ON_FALSE((paddr_start % CONFIG_MMU_PAGE_SIZE == 0), ESP_ERR_INVALID_ARG, TAG, "paddr must be rounded up to the nearest multiple of CONFIG_MMU_PAGE_SIZE");
  387. ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
  388. size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
  389. int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
  390. if (found_region_id == -1) {
  391. ESP_EARLY_LOGE(TAG, "no such vaddr range");
  392. return ESP_ERR_NOT_FOUND;
  393. }
  394. //Now we're sure we can find an available block inside a certain region
  395. mem_region_t *found_region = &s_mmu_ctx.mem_regions[found_region_id];
  396. mem_block_t *dummy_head = NULL;
  397. mem_block_t *dummy_tail = NULL;
  398. mem_block_t *new_block = NULL;
  399. if (TAILQ_EMPTY(&found_region->mem_block_head)) {
  400. dummy_head = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  401. ESP_GOTO_ON_FALSE(dummy_head, ESP_ERR_NO_MEM, err, TAG, "no mem");
  402. dummy_head->laddr_start = found_region->free_head;
  403. dummy_head->laddr_end = found_region->free_head;
  404. //We don't care vaddr or paddr address for dummy head
  405. dummy_head->size = 0;
  406. dummy_head->caps = caps;
  407. TAILQ_INSERT_HEAD(&found_region->mem_block_head, dummy_head, entries);
  408. dummy_tail = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  409. ESP_GOTO_ON_FALSE(dummy_tail, ESP_ERR_NO_MEM, err, TAG, "no mem");
  410. dummy_tail->laddr_start = found_region->end;
  411. dummy_tail->laddr_end = found_region->end;
  412. //We don't care vaddr or paddr address for dummy tail
  413. dummy_tail->size = 0;
  414. dummy_tail->caps = caps;
  415. TAILQ_INSERT_TAIL(&found_region->mem_block_head, dummy_tail, entries);
  416. }
  417. //Check if paddr is overlapped
  418. mem_block_t *mem_block = NULL;
  419. #if ENABLE_PADDR_CHECK
  420. bool is_enclosed = false;
  421. bool is_overlapped = false;
  422. bool allow_overlap = flags & ESP_MMU_MMAP_FLAG_PADDR_SHARED;
  423. TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
  424. if (target == mem_block->target) {
  425. if ((s_is_enclosed(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
  426. //the to-be-mapped paddr block is mapped already
  427. is_enclosed = true;
  428. break;
  429. }
  430. if (!allow_overlap && (s_is_overlapped(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
  431. is_overlapped = true;
  432. break;
  433. }
  434. }
  435. }
  436. if (is_enclosed) {
  437. ESP_LOGW(TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
  438. *out_ptr = (void *)mem_block->vaddr_start;
  439. return ESP_ERR_INVALID_STATE;
  440. }
  441. if (!allow_overlap && is_overlapped) {
  442. ESP_LOGE(TAG, "paddr block is overlapped with an already mapped paddr block");
  443. return ESP_ERR_INVALID_ARG;
  444. }
  445. #endif //#if ENABLE_PADDR_CHECK
  446. new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
  447. ESP_GOTO_ON_FALSE(new_block, ESP_ERR_NO_MEM, err, TAG, "no mem");
  448. //Reserve this block as it'll be mapped
  449. bool found = false;
  450. // Get the end address of the dummy_head block, which is always first block on the list
  451. uint32_t last_end = TAILQ_FIRST(&found_region->mem_block_head)->laddr_end;
  452. size_t slot_len = 0;
  453. size_t max_slot_len = 0;
  454. mem_block_t *found_block = NULL; //This stands for the block we found, whose slot between its prior block is where we will insert the new block to
  455. TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
  456. slot_len = mem_block->laddr_start - last_end;
  457. if (!found) {
  458. if (slot_len >= aligned_size) {
  459. //Found it
  460. found = true;
  461. found_block = mem_block;
  462. slot_len -= aligned_size;
  463. new_block->laddr_start = last_end;
  464. }
  465. }
  466. max_slot_len = (slot_len > max_slot_len) ? slot_len : max_slot_len;
  467. last_end = mem_block->laddr_end;
  468. }
  469. assert(found);
  470. //insert the to-be-mapped new block to the list
  471. TAILQ_INSERT_BEFORE(found_block, new_block, entries);
  472. //Finally, we update the max_slot_size
  473. found_region->max_slot_size = max_slot_len;
  474. //Now we fill others according to the found `new_block->laddr_start`
  475. new_block->laddr_end = new_block->laddr_start + aligned_size;
  476. new_block->size = aligned_size;
  477. new_block->caps = caps;
  478. new_block->paddr_start = paddr_start;
  479. new_block->paddr_end = paddr_start + aligned_size;
  480. new_block->target = target;
  481. if (caps & MMU_MEM_CAP_EXEC) {
  482. new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION, target);
  483. new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION, target);
  484. } else {
  485. new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA, target);
  486. new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA, target);
  487. }
  488. //do mapping
  489. s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size);
  490. *out_ptr = (void *)new_block->vaddr_start;
  491. return ESP_OK;
  492. err:
  493. if (dummy_tail) {
  494. free(dummy_tail);
  495. }
  496. if (dummy_head) {
  497. free(dummy_head);
  498. }
  499. return ret;
  500. }
  501. #if MMU_LL_MMU_PER_TARGET
  502. FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size)
  503. {
  504. uint32_t mmu_id = 0;
  505. mmu_target_t target = mmu_ll_vaddr_to_target(vaddr_start);
  506. if (target == MMU_TARGET_FLASH0) {
  507. mmu_id = MMU_LL_FLASH_MMU_ID;
  508. } else {
  509. mmu_id = MMU_LL_PSRAM_MMU_ID;
  510. }
  511. mmu_hal_unmap_region(mmu_id, vaddr_start, size);
  512. }
  513. #else
  514. FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size)
  515. {
  516. mmu_hal_unmap_region(0, vaddr_start, size);
  517. #if (SOC_MMU_PERIPH_NUM == 2)
  518. #if !CONFIG_FREERTOS_UNICORE
  519. mmu_hal_unmap_region(1, vaddr_start, size);
  520. #endif // #if !CONFIG_FREERTOS_UNICORE
  521. #endif // #if (SOC_MMU_PERIPH_NUM == 2)
  522. }
  523. #endif
  524. static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_t size)
  525. {
  526. /**
  527. * Disable Cache, after this function, involved code and data should be placed in internal RAM.
  528. *
  529. * @note we call this for now, but this will be refactored to move out of `spi_flash`
  530. */
  531. spi_flash_disable_interrupts_caches_and_other_cpu();
  532. s_unmapping_operation(vaddr_start, size);
  533. //enable Cache, after this function, internal RAM access is no longer mandatory
  534. spi_flash_enable_interrupts_caches_and_other_cpu();
  535. }
  536. esp_err_t esp_mmu_unmap(void *ptr)
  537. {
  538. ESP_RETURN_ON_FALSE(ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  539. mem_region_t *region = NULL;
  540. mem_block_t *mem_block = NULL;
  541. uint32_t ptr_laddr = mmu_ll_vaddr_to_laddr((uint32_t)ptr);
  542. size_t slot_len = 0;
  543. for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
  544. if (ptr_laddr >= s_mmu_ctx.mem_regions[i].free_head && ptr_laddr < s_mmu_ctx.mem_regions[i].end) {
  545. region = &s_mmu_ctx.mem_regions[i];
  546. }
  547. }
  548. ESP_RETURN_ON_FALSE(region, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer is outside external memory regions");
  549. bool found = false;
  550. mem_block_t *found_block = NULL;
  551. TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
  552. if (mem_block == TAILQ_FIRST(&region->mem_block_head) || mem_block == TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
  553. //we don't care the dummy_head and the dummy_tail
  554. continue;
  555. }
  556. //now we are only traversing the actual dynamically allocated blocks, dummy_head and dummy_tail are excluded already
  557. if (mem_block->laddr_start == ptr_laddr) {
  558. slot_len = TAILQ_NEXT(mem_block, entries)->laddr_start - TAILQ_PREV(mem_block, mem_block_head_, entries)->laddr_end;
  559. region->max_slot_size = (slot_len > region->max_slot_size) ? slot_len : region->max_slot_size;
  560. found = true;
  561. found_block = mem_block;
  562. break;
  563. }
  564. }
  565. ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer isn't mapped yet");
  566. //do unmap
  567. s_do_unmapping(mem_block->vaddr_start, mem_block->size);
  568. //remove the already unmapped block from the list
  569. TAILQ_REMOVE(&region->mem_block_head, found_block, entries);
  570. free(found_block);
  571. return ESP_OK;
  572. }
  573. esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream)
  574. {
  575. char line[100];
  576. for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
  577. fprintf(stream, "region %d:\n", i);
  578. fprintf(stream, "%-15s %-14s %-14s %-12s %-12s %-12s\n", "Bus ID", "Start", "Free Head", "End", "Caps", "Max Slot Size");
  579. char *buf = line;
  580. size_t len = sizeof(line);
  581. memset(line, 0x0, len);
  582. snprintf(buf, len, "0x%-13x 0x%-12"PRIx32" 0x%-11"PRIx32" 0x%-10"PRIx32" 0x%-10x 0x%-8x\n",
  583. s_mmu_ctx.mem_regions[i].bus_id,
  584. s_mmu_ctx.mem_regions[i].start,
  585. s_mmu_ctx.mem_regions[i].free_head,
  586. s_mmu_ctx.mem_regions[i].end,
  587. s_mmu_ctx.mem_regions[i].caps,
  588. s_mmu_ctx.mem_regions[i].max_slot_size);
  589. fputs(line, stream);
  590. fprintf(stream, "mapped blocks:\n");
  591. fprintf(stream, "%-4s %-13s %-12s %-12s %-6s %-13s %-11s\n", "ID", "Vaddr Start", "Vaddr End", "Block Size", "Caps", "Paddr Start", "Paddr End");
  592. mem_region_t *region = &s_mmu_ctx.mem_regions[i];
  593. mem_block_t *mem_block = NULL;
  594. int id = 0;
  595. TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
  596. if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
  597. snprintf(buf, len, "%-4d 0x%-11x 0x%-10x 0x%-10x 0x%-4x 0x%-11"PRIx32" 0x%-8"PRIx32"\n",
  598. id,
  599. mem_block->vaddr_start,
  600. mem_block->vaddr_end,
  601. mem_block->size,
  602. mem_block->caps,
  603. mem_block->paddr_start,
  604. mem_block->paddr_end);
  605. fputs(line, stream);
  606. id++;
  607. }
  608. }
  609. fprintf(stream, "\n");
  610. }
  611. return ESP_OK;
  612. }
  613. /*---------------------------------------------------------------
  614. Private dump functions, IRAM Safe
  615. ---------------------------------------------------------------*/
  616. esp_err_t IRAM_ATTR esp_mmu_map_dump_mapped_blocks_private(void)
  617. {
  618. for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
  619. mem_region_t *region = &s_mmu_ctx.mem_regions[i];
  620. mem_block_t *mem_block = NULL;
  621. TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
  622. if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
  623. ESP_DRAM_LOGI(TAG, "block vaddr_start: 0x%x", mem_block->vaddr_start);
  624. ESP_DRAM_LOGI(TAG, "block vaddr_end: 0x%x", mem_block->vaddr_end);
  625. ESP_DRAM_LOGI(TAG, "block size: 0x%x", mem_block->size);
  626. ESP_DRAM_LOGI(TAG, "block caps: 0x%x\n", mem_block->caps);
  627. ESP_DRAM_LOGI(TAG, "block paddr_start: 0x%x\n", mem_block->paddr_start);
  628. ESP_DRAM_LOGI(TAG, "block paddr_end: 0x%x\n", mem_block->paddr_end);
  629. }
  630. }
  631. ESP_DRAM_LOGI(TAG, "region bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
  632. ESP_DRAM_LOGI(TAG, "region start: 0x%x", s_mmu_ctx.mem_regions[i].start);
  633. ESP_DRAM_LOGI(TAG, "region end: 0x%x", s_mmu_ctx.mem_regions[i].end);
  634. ESP_DRAM_LOGI(TAG, "region caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
  635. }
  636. return ESP_OK;
  637. }
  638. /*---------------------------------------------------------------
  639. Helper APIs for conversion between vaddr and paddr
  640. ---------------------------------------------------------------*/
  641. static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
  642. {
  643. //we call this for now, but this will be refactored to move out of `spi_flash`
  644. spi_flash_disable_interrupts_caches_and_other_cpu();
  645. //On ESP32, core 1 settings should be the same as the core 0
  646. bool is_mapped = mmu_hal_vaddr_to_paddr(0, vaddr, out_paddr, out_target);
  647. spi_flash_enable_interrupts_caches_and_other_cpu();
  648. return is_mapped;
  649. }
  650. esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
  651. {
  652. ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  653. ESP_RETURN_ON_FALSE(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
  654. esp_paddr_t paddr = 0;
  655. mmu_target_t target = 0;
  656. bool is_mapped = s_vaddr_to_paddr((uint32_t)vaddr, &paddr, &target);
  657. ESP_RETURN_ON_FALSE(is_mapped, ESP_ERR_NOT_FOUND, TAG, "vaddr isn't mapped");
  658. *out_paddr = paddr;
  659. *out_target = target;
  660. return ESP_OK;
  661. }
  662. static bool NOINLINE_ATTR IRAM_ATTR s_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
  663. {
  664. //we call this for now, but this will be refactored to move out of `spi_flash`
  665. spi_flash_disable_interrupts_caches_and_other_cpu();
  666. //On ESP32, core 1 settings should be the same as the core 0
  667. bool found = mmu_hal_paddr_to_vaddr(0, paddr, target, type, out_vaddr);
  668. spi_flash_enable_interrupts_caches_and_other_cpu();
  669. return found;
  670. }
  671. esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr)
  672. {
  673. ESP_RETURN_ON_FALSE(out_vaddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
  674. uint32_t vaddr = 0;
  675. bool found = false;
  676. found = s_paddr_to_vaddr(paddr, target, type, &vaddr);
  677. ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "paddr isn't mapped");
  678. *out_vaddr = (void *)vaddr;
  679. return ESP_OK;
  680. }
  681. #if ENABLE_PADDR_CHECK
  682. /*---------------------------------------------------------------
  683. Helper functions to check block
  684. ---------------------------------------------------------------*/
  685. /**
  686. * Check if a new block is enclosed by another, e.g.
  687. *
  688. * This is enclosed:
  689. *
  690. * new_block_start new_block_end
  691. * |-------- New Block --------|
  692. * |--------------- Block ---------------|
  693. * block_start block_end
  694. *
  695. * @note Note the difference between `s_is_overlapped()` below
  696. *
  697. * @param block_start An original block start
  698. * @param block_end An original block end
  699. * @param new_block_start New block start
  700. * @param new_block_size New block size
  701. *
  702. * @return True: new block is enclosed; False: new block is not enclosed
  703. */
  704. static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
  705. {
  706. bool is_enclosed = false;
  707. uint32_t new_block_end = new_block_start + new_block_size;
  708. if ((new_block_start >= block_start) && (new_block_end <= block_end)) {
  709. is_enclosed = true;
  710. } else {
  711. is_enclosed = false;
  712. }
  713. return is_enclosed;
  714. }
  715. /**
  716. * Check if a new block is overlapped by another, e.g.
  717. *
  718. * This is overlapped:
  719. *
  720. * new_block_start new_block_end
  721. * |---------- New Block ----------|
  722. * |--------------- Block ---------------|
  723. * block_start block_end
  724. *
  725. * @note Note the difference between `s_is_enclosed()` above
  726. *
  727. * @param block_start An original block start
  728. * @param block_end An original block end
  729. * @param new_block_start New block start
  730. * @param new_block_size New block size
  731. *
  732. * @return True: new block is overlapped; False: new block is not overlapped
  733. */
  734. static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
  735. {
  736. bool is_overlapped = false;
  737. uint32_t new_block_end = new_block_start + new_block_size;
  738. if (((new_block_start < block_start) && (new_block_end > block_start)) ||
  739. ((new_block_start < block_end) && (new_block_end > block_end))) {
  740. is_overlapped = true;
  741. } else {
  742. is_overlapped = false;
  743. }
  744. return is_overlapped;
  745. }
  746. #endif //#if ENABLE_PADDR_CHECK