mmu.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-02-20 bigmagic first version
  9. */
  10. #include <mmu.h>
  11. #include <stddef.h>
  12. #define TTBR_CNP 1
  13. typedef unsigned long int uint64_t;
  14. static unsigned long main_tbl[512 * 20] __attribute__((aligned (4096)));
  15. #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
  16. #define PMD_TYPE_SECT (1 << 0)
  17. #define PMD_TYPE_TABLE (3 << 0)
  18. #define PTE_TYPE_PAGE (3 << 0)
  19. #define BITS_PER_VA 39
  20. /* Granule size of 4KB is being used */
  21. #define GRANULE_SIZE_SHIFT 12
  22. #define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
  23. #define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE)
  24. #define PMD_TYPE_MASK (3 << 0)
  25. int free_idx = 1;
  26. void mmu_memset(char *dst, char v, size_t len)
  27. {
  28. while (len--)
  29. {
  30. *dst++ = v;
  31. }
  32. }
  33. static unsigned long __page_off = 0;
  34. static unsigned long get_free_page(void)
  35. {
  36. __page_off += 512;
  37. return (unsigned long)(main_tbl + __page_off);
  38. }
  39. void mmu_init(void)
  40. {
  41. unsigned long val64;
  42. unsigned long val32;
  43. val64 = 0x007f6eUL;
  44. __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n"::"r"(val64));
  45. __asm__ volatile("mrs %0, MAIR_EL1\n dsb sy\n":"=r"(val64));
  46. //TCR_EL1
  47. val32 = (16UL << 0)//48bit
  48. | (0x0UL << 6)
  49. | (0x0UL << 7)
  50. | (0x3UL << 8)
  51. | (0x3UL << 10)//Inner Shareable
  52. | (0x2UL << 12)
  53. | (0x0UL << 14)//4K
  54. | (0x0UL << 16)
  55. | (0x0UL << 22)
  56. | (0x1UL << 23)
  57. | (0x2UL << 30)
  58. | (0x1UL << 32)
  59. | (0x0UL << 35)
  60. | (0x0UL << 36)
  61. | (0x0UL << 37)
  62. | (0x0UL << 38);
  63. __asm__ volatile("msr TCR_EL1, %0\n"::"r"(val32));
  64. __asm__ volatile("mrs %0, TCR_EL1\n":"=r"(val32));
  65. __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\n"::"r"(main_tbl));
  66. __asm__ volatile("mrs %0, TTBR0_EL1\n dsb sy\n":"=r"(val64));
  67. mmu_memset((char *)main_tbl, 0, 4096);
  68. }
  69. void mmu_enable(void)
  70. {
  71. unsigned long val64;
  72. unsigned long val32;
  73. __asm__ volatile("mrs %0, SCTLR_EL1\n":"=r"(val64));
  74. val64 &= ~0x1000; //disable I
  75. __asm__ volatile("dmb sy\n msr SCTLR_EL1, %0\n isb sy\n"::"r"(val64));
  76. __asm__ volatile("IC IALLUIS\n dsb sy\n isb sy\n");
  77. __asm__ volatile("tlbi vmalle1\n dsb sy\n isb sy\n");
  78. //SCTLR_EL1, turn on mmu
  79. __asm__ volatile("mrs %0, SCTLR_EL1\n":"=r"(val32));
  80. val32 |= 0x1005; //enable mmu, I C M
  81. __asm__ volatile("dmb sy\n msr SCTLR_EL1, %0\nisb sy\n"::"r"(val32));
  82. }
  83. static int map_single_page_2M(unsigned long* lv0_tbl, unsigned long va, unsigned long pa, unsigned long attr)
  84. {
  85. int level;
  86. unsigned long* cur_lv_tbl = lv0_tbl;
  87. unsigned long page;
  88. unsigned long off;
  89. int level_shift = 39;
  90. if (va & (0x200000UL - 1))
  91. {
  92. return MMU_MAP_ERROR_VANOTALIGN;
  93. }
  94. if (pa & (0x200000UL - 1))
  95. {
  96. return MMU_MAP_ERROR_PANOTALIGN;
  97. }
  98. for (level = 0; level < 2; level++)
  99. {
  100. off = (va >> level_shift);
  101. off &= MMU_LEVEL_MASK;
  102. if ((cur_lv_tbl[off] & 1) == 0)
  103. {
  104. page = get_free_page();
  105. if (!page)
  106. {
  107. return MMU_MAP_ERROR_NOPAGE;
  108. }
  109. mmu_memset((char *)page, 0, 4096);
  110. cur_lv_tbl[off] = page | 0x3UL;
  111. }
  112. page = cur_lv_tbl[off];
  113. if (!(page & 0x2))
  114. {
  115. //is block! error!
  116. return MMU_MAP_ERROR_CONFLICT;
  117. }
  118. cur_lv_tbl = (unsigned long*)(page & 0x0000fffffffff000UL);
  119. level_shift -= 9;
  120. }
  121. attr &= 0xfff0000000000ffcUL;
  122. pa |= (attr | 0x1UL); //block
  123. off = (va >> 21);
  124. off &= MMU_LEVEL_MASK;
  125. cur_lv_tbl[off] = pa;
  126. return 0;
  127. }
  128. int armv8_map_2M(unsigned long va, unsigned long pa, int count, unsigned long attr)
  129. {
  130. int i;
  131. int ret;
  132. if (va & (0x200000 - 1))
  133. {
  134. return -1;
  135. }
  136. if (pa & (0x200000 - 1))
  137. {
  138. return -1;
  139. }
  140. for (i = 0; i < count; i++)
  141. {
  142. ret = map_single_page_2M((unsigned long *)main_tbl, va, pa, attr);
  143. va += 0x200000;
  144. pa += 0x200000;
  145. if (ret != 0)
  146. {
  147. return ret;
  148. }
  149. }
  150. return 0;
  151. }
  152. static void set_table(uint64_t *pt, uint64_t *table_addr)
  153. {
  154. uint64_t val;
  155. val = (0x3UL | (uint64_t)table_addr);
  156. *pt = val;
  157. }
  158. void mmu_memset2(unsigned char *dst, char v, int len)
  159. {
  160. while (len--)
  161. {
  162. *dst++ = v;
  163. }
  164. }
  165. static uint64_t *create_table(void)
  166. {
  167. uint64_t *new_table = (uint64_t *)((unsigned char *)&main_tbl[0] + free_idx * 4096); //+ free_idx * GRANULE_SIZE;
  168. /* Mark all entries as invalid */
  169. mmu_memset2((unsigned char *)new_table, 0, 4096);
  170. free_idx++;
  171. return new_table;
  172. }
  173. static int pte_type(uint64_t *pte)
  174. {
  175. return *pte & PMD_TYPE_MASK;
  176. }
  177. static int level2shift(int level)
  178. {
  179. /* Page is 12 bits wide, every level translates 9 bits */
  180. return (12 + 9 * (3 - level));
  181. }
  182. static uint64_t *get_level_table(uint64_t *pte)
  183. {
  184. uint64_t *table = (uint64_t *)(*pte & XLAT_ADDR_MASK);
  185. if (pte_type(pte) != PMD_TYPE_TABLE)
  186. {
  187. table = create_table();
  188. set_table(pte, table);
  189. }
  190. return table;
  191. }
  192. static void map_region(uint64_t virt, uint64_t phys, uint64_t size, uint64_t attr)
  193. {
  194. uint64_t block_size = 0;
  195. uint64_t block_shift = 0;
  196. uint64_t *pte;
  197. uint64_t idx = 0;
  198. uint64_t addr = 0;
  199. uint64_t *table = 0;
  200. int level = 0;
  201. addr = virt;
  202. while (size)
  203. {
  204. table = &main_tbl[0];
  205. for (level = 0; level < 4; level++)
  206. {
  207. block_shift = level2shift(level);
  208. idx = addr >> block_shift;
  209. idx = idx%512;
  210. block_size = (uint64_t)(1L << block_shift);
  211. pte = table + idx;
  212. if (size >= block_size && IS_ALIGNED(addr, block_size))
  213. {
  214. attr &= 0xfff0000000000ffcUL;
  215. if(level != 3)
  216. {
  217. *pte = phys | (attr | 0x1UL);
  218. }
  219. else
  220. {
  221. *pte = phys | (attr | 0x3UL);
  222. }
  223. addr += block_size;
  224. phys += block_size;
  225. size -= block_size;
  226. break;
  227. }
  228. table = get_level_table(pte);
  229. }
  230. }
  231. }
  232. void armv8_map(unsigned long va, unsigned long pa, unsigned long size, unsigned long attr)
  233. {
  234. map_region(va, pa, size, attr);
  235. }