mm_page.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-11-01 Jesven The first version
  9. * 2022-12-13 WangXiaoyao Hot-pluggable, extensible
  10. * page management algorithm
  11. * 2023-02-20 WangXiaoyao Multi-list page-management
  12. * 2023-11-28 Shell Bugs fix for page_install on shadow region
  13. * 2024-06-18 Shell Added affinity page management for page coloring.
  14. */
  15. #include <rtthread.h>
  16. #include <stddef.h>
  17. #include <stdint.h>
  18. #include <string.h>
  19. #include "mm_fault.h"
  20. #include "mm_private.h"
  21. #include "mm_aspace.h"
  22. #include "mm_flag.h"
  23. #include "mm_page.h"
  24. #include <mmu.h>
  25. #define DBG_TAG "mm.page"
  26. #define DBG_LVL DBG_WARNING
  27. #include <rtdbg.h>
  28. RT_STATIC_ASSERT(order_huge_pg, RT_PAGE_MAX_ORDER > ARCH_PAGE_SHIFT - 2);
  29. RT_STATIC_ASSERT(size_width, sizeof(rt_size_t) == sizeof(void *));
  30. #ifdef RT_USING_SMART
  31. #include "lwp_arch_comm.h"
  32. #endif /* RT_USING_SMART */
  33. static rt_size_t init_mpr_align_start;
  34. static rt_size_t init_mpr_align_end;
  35. static void *init_mpr_cont_start;
  36. static struct rt_varea mpr_varea;
  37. typedef union
  38. {
  39. struct rt_page *page_list;
  40. rt_ubase_t aff_page_map;
  41. } pgls_agr_t;
  42. #define PGLS_IS_AFF_MAP(pgls) (!!((pgls).aff_page_map & 0x1))
  43. #define PGLS_FROM_AFF_MAP(pgls, aff_map) \
  44. ((pgls).aff_page_map = (-(rt_ubase_t)(aff_map)) | 0x1)
  45. #define PGLS_GET_AFF_MAP(pgls) \
  46. ((struct rt_page **)-((pgls).aff_page_map & ~0x1))
  47. #define PGLS_GET(pgls) \
  48. (PGLS_IS_AFF_MAP(pgls) ? PGLS_GET_AFF_MAP(pgls) : (pgls).page_list)
  49. #define PAGE_TO_AFFID(page) (RT_PAGE_PICK_AFFID(page_to_paddr(page)))
  50. /* affinity id */
  51. #define AFFID_BLK_BITS \
  52. ((sizeof(int) * 8 - 1) - __builtin_clz(RT_PAGE_AFFINITY_BLOCK_SIZE) - ARCH_PAGE_SHIFT)
  53. #define AFFID_NUMOF_ID_IN_SET(order) \
  54. ((RT_PAGE_AFFINITY_BLOCK_SIZE / ARCH_PAGE_SIZE) / (1ul << (order)))
  55. #define AFFID_BITS_MASK(order) \
  56. (((1 << AFFID_BLK_BITS) - 1) - ((1 << (order)) - 1))
  57. static pgls_agr_t page_list_low[RT_PAGE_MAX_ORDER];
  58. static rt_page_t
  59. aff_pglist_low[AFFID_NUMOF_ID_IN_SET(0) * 2 - 2];
  60. static pgls_agr_t page_list_high[RT_PAGE_MAX_ORDER];
  61. static rt_page_t
  62. aff_pglist_high[AFFID_NUMOF_ID_IN_SET(0) * 2 - 2];
  63. /* protect buddy list and page records */
  64. static RT_DEFINE_SPINLOCK(_pgmgr_lock);
  65. #define page_start ((rt_page_t)rt_mpr_start)
  66. static rt_size_t _page_nr, _page_nr_hi;
  67. static rt_size_t _freed_nr, _freed_nr_hi;
  68. static rt_size_t early_offset;
  69. static const char *get_name(rt_varea_t varea)
  70. {
  71. return "master-page-record";
  72. }
  73. static void hint_free(rt_mm_va_hint_t hint)
  74. {
  75. hint->flags = MMF_MAP_FIXED;
  76. hint->limit_start = rt_kernel_space.start;
  77. hint->limit_range_size = rt_kernel_space.size;
  78. hint->prefer = rt_mpr_start;
  79. }
  80. static void on_page_fault(struct rt_varea *varea,
  81. struct rt_aspace_fault_msg *msg)
  82. {
  83. char *init_start = (void *)init_mpr_align_start;
  84. char *init_end = (void *)init_mpr_align_end;
  85. if ((char *)msg->fault_vaddr < init_end &&
  86. (char *)msg->fault_vaddr >= init_start)
  87. {
  88. rt_size_t offset = (char *)msg->fault_vaddr - init_start;
  89. msg->response.status = MM_FAULT_STATUS_OK;
  90. msg->response.vaddr = (char *)init_mpr_cont_start + offset;
  91. msg->response.size = ARCH_PAGE_SIZE;
  92. }
  93. else
  94. {
  95. rt_mm_dummy_mapper.on_page_fault(varea, msg);
  96. }
  97. }
  98. static struct rt_mem_obj mm_page_mapper = {
  99. .get_name = get_name,
  100. .on_page_fault = on_page_fault,
  101. .hint_free = hint_free,
  102. };
  103. #ifdef RT_DEBUGGING_PAGE_LEAK
  104. static volatile int enable;
  105. static rt_page_t _trace_head;
  106. #define TRACE_ALLOC(pg, size) _trace_alloc(pg, __builtin_return_address(0), size)
  107. #define TRACE_FREE(pgaddr, size) _trace_free(pgaddr, __builtin_return_address(0), size)
  108. static long _alloc_cnt;
  109. void rt_page_leak_trace_start()
  110. {
  111. // TODO multicore safety
  112. _trace_head = NULL;
  113. _alloc_cnt = 0;
  114. enable = 1;
  115. }
  116. MSH_CMD_EXPORT(rt_page_leak_trace_start, start page leak tracer);
  117. static void _collect()
  118. {
  119. rt_page_t page = _trace_head;
  120. if (!page)
  121. {
  122. rt_kprintf("ok! ALLOC CNT %ld\n", _alloc_cnt);
  123. }
  124. else
  125. {
  126. while (page)
  127. {
  128. rt_page_t next = page->tl_next;
  129. void *pg_va = rt_page_page2addr(page);
  130. LOG_W("LEAK: %p, allocator: %p, size bits: %lx", pg_va, page->caller, page->trace_size);
  131. rt_pages_free(pg_va, page->trace_size);
  132. page = next;
  133. }
  134. }
  135. }
  136. void rt_page_leak_trace_stop()
  137. {
  138. // TODO multicore safety
  139. enable = 0;
  140. _collect();
  141. }
  142. MSH_CMD_EXPORT(rt_page_leak_trace_stop, stop page leak tracer);
  143. static void _trace_alloc(rt_page_t page, void *caller, size_t size_bits)
  144. {
  145. if (enable)
  146. {
  147. page->caller = caller;
  148. page->trace_size = size_bits;
  149. page->tl_prev = NULL;
  150. page->tl_next = NULL;
  151. _alloc_cnt++;
  152. if (_trace_head == NULL)
  153. {
  154. _trace_head = page;
  155. }
  156. else
  157. {
  158. _trace_head->tl_prev = page;
  159. page->tl_next = _trace_head;
  160. _trace_head = page;
  161. }
  162. }
  163. }
  164. void _report(rt_page_t page, size_t size_bits, char *msg)
  165. {
  166. void *pg_va = rt_page_page2addr(page);
  167. LOG_W("%s: %p, allocator: %p, size bits: %lx", msg, pg_va, page->caller, page->trace_size);
  168. rt_kprintf("backtrace\n");
  169. rt_backtrace();
  170. }
  171. static void _trace_free(rt_page_t page, void *caller, size_t size_bits)
  172. {
  173. if (enable)
  174. {
  175. /* free after free */
  176. if (page->trace_size == 0xabadcafe)
  177. {
  178. _report(page, size_bits, "free after free");
  179. return ;
  180. }
  181. else if (page->trace_size != size_bits)
  182. {
  183. rt_kprintf("free with size bits %lx\n", size_bits);
  184. _report(page, size_bits, "incompatible size bits parameter");
  185. return ;
  186. }
  187. if (page->ref_cnt == 0)
  188. {
  189. _alloc_cnt--;
  190. if (page->tl_prev)
  191. page->tl_prev->tl_next = page->tl_next;
  192. if (page->tl_next)
  193. page->tl_next->tl_prev = page->tl_prev;
  194. if (page == _trace_head)
  195. _trace_head = page->tl_next;
  196. page->tl_prev = NULL;
  197. page->tl_next = NULL;
  198. page->trace_size = 0xabadcafe;
  199. }
  200. }
  201. }
  202. #else
  203. #define TRACE_ALLOC(x, y)
  204. #define TRACE_FREE(x, y)
  205. #endif
  206. /* page management */
  207. #ifdef RT_DEBUGGING_PAGE_POISON
  208. #include <bitmap.h>
  209. RT_BITMAP_DECLARE(_init_region_usage_trace, (1 << (1 + ARCH_SECTION_SHIFT - ARCH_PAGE_SHIFT)));
  210. #else
  211. typedef char rt_bitmap_t[0];
  212. #define RT_BITMAP_LEN(__name) (__name)
  213. #endif /* RT_DEBUGGING_PAGE_POISON */
  214. static struct installed_page_reg
  215. {
  216. rt_region_t region_area;
  217. struct installed_page_reg *next;
  218. struct rt_spinlock lock;
  219. #ifdef RT_DEBUGGING_PAGE_POISON
  220. rt_bitmap_t *usage_trace;
  221. #endif /* RT_DEBUGGING_PAGE_POISON */
  222. } _init_region;
  223. static RT_DEFINE_SPINLOCK(_inst_page_reg_lock);
  224. static struct installed_page_reg *_inst_page_reg_head;
  225. static void _print_region_list(void)
  226. {
  227. struct installed_page_reg *iter;
  228. int counts = 0;
  229. rt_spin_lock(&_inst_page_reg_lock);
  230. iter = _inst_page_reg_head;
  231. while (iter != RT_NULL)
  232. {
  233. rt_kprintf(" %d: [%p, %p]\n", counts++, iter->region_area.start + PV_OFFSET,
  234. iter->region_area.end + PV_OFFSET);
  235. iter = iter->next;
  236. }
  237. rt_spin_unlock(&_inst_page_reg_lock);
  238. }
  239. static struct installed_page_reg *_find_page_region(rt_ubase_t page_va)
  240. {
  241. struct installed_page_reg *iter;
  242. struct installed_page_reg *rc = RT_NULL;
  243. rt_bool_t found = RT_FALSE;
  244. rt_spin_lock(&_inst_page_reg_lock);
  245. for (iter = _inst_page_reg_head; iter; iter = iter->next)
  246. {
  247. if (page_va >= iter->region_area.start &&
  248. page_va < iter->region_area.end)
  249. {
  250. found = RT_TRUE;
  251. break;
  252. }
  253. }
  254. rt_spin_unlock(&_inst_page_reg_lock);
  255. if (found)
  256. {
  257. rc = iter;
  258. }
  259. return rc;
  260. }
  261. rt_bool_t rt_page_is_member(rt_base_t page_pa)
  262. {
  263. return _find_page_region(page_pa - PV_OFFSET) != RT_NULL;
  264. }
  265. static rt_bool_t _pages_are_member(rt_ubase_t page_va, size_t size_bits)
  266. {
  267. rt_bool_t rc = RT_TRUE;
  268. rt_ubase_t iter_frame = page_va;
  269. size_t frame_end = page_va + (1 << size_bits);
  270. while (iter_frame < frame_end)
  271. {
  272. size_t overlap_size;
  273. struct installed_page_reg *page_reg = _find_page_region(iter_frame);
  274. if (!page_reg)
  275. {
  276. rc = RT_FALSE;
  277. LOG_E("Allocated invalid page %p", iter_frame);
  278. break;
  279. }
  280. overlap_size = page_reg->region_area.end - iter_frame;
  281. iter_frame += overlap_size;
  282. }
  283. return rc;
  284. }
  285. #ifdef RT_DEBUGGING_PAGE_POISON
  286. static rt_err_t _unpoisoned_pages(char *head, rt_uint32_t size_bits)
  287. {
  288. rt_err_t error = RT_EOK;
  289. struct installed_page_reg *page_reg = _find_page_region((rt_ubase_t)head);
  290. if (page_reg)
  291. {
  292. int pages_count = 1 << size_bits;
  293. long bit_number = ((rt_ubase_t)head - page_reg->region_area.start) / ARCH_PAGE_SIZE;
  294. /* mark the pages as allocated */
  295. for (size_t i = 0; i < pages_count; i++, bit_number++)
  296. {
  297. rt_spin_lock(&_inst_page_reg_lock);
  298. if (rt_bitmap_test_bit(page_reg->usage_trace, bit_number))
  299. {
  300. error = RT_ERROR;
  301. rt_kprintf("%s: Pages[%p, %d] is already in used by others!\n", __func__, head, size_bits);
  302. }
  303. rt_bitmap_set_bit(page_reg->usage_trace, bit_number);
  304. rt_spin_unlock(&_inst_page_reg_lock);
  305. }
  306. }
  307. else
  308. {
  309. error = RT_EINVAL;
  310. }
  311. return -error;
  312. }
  313. static rt_err_t _poisoned_pages(char *head, rt_uint32_t size_bits)
  314. {
  315. rt_err_t error = RT_EOK;
  316. struct installed_page_reg *page_reg = _find_page_region((rt_ubase_t)head);
  317. if (page_reg)
  318. {
  319. int pages_count = 1 << size_bits;
  320. long bit_number = ((rt_ubase_t)head - page_reg->region_area.start) / ARCH_PAGE_SIZE;
  321. /* mark the pages as free */
  322. for (size_t i = 0; i < pages_count; i++, bit_number++)
  323. {
  324. rt_spin_lock(&_inst_page_reg_lock);
  325. if (!rt_bitmap_test_bit(page_reg->usage_trace, bit_number))
  326. {
  327. error = RT_ERROR;
  328. rt_kprintf("%s: Pages[%p, %d] is freed before!\n", __func__, head, size_bits);
  329. }
  330. rt_bitmap_clear_bit(page_reg->usage_trace, bit_number);
  331. rt_spin_unlock(&_inst_page_reg_lock);
  332. }
  333. }
  334. else
  335. {
  336. error = RT_EINVAL;
  337. }
  338. return -error;
  339. }
  340. #endif /* RT_DEBUGGING_PAGE_POISON */
  341. static inline void *page_to_addr(rt_page_t page)
  342. {
  343. return (void *)(((page - page_start) << ARCH_PAGE_SHIFT) - PV_OFFSET);
  344. }
  345. static inline rt_ubase_t page_to_paddr(rt_page_t page)
  346. {
  347. return (rt_ubase_t)((page - page_start) << ARCH_PAGE_SHIFT);
  348. }
  349. static inline rt_page_t addr_to_page(rt_page_t pg_start, void *addr)
  350. {
  351. addr = (char *)addr + PV_OFFSET;
  352. return &pg_start[((rt_ubase_t)addr >> ARCH_PAGE_SHIFT)];
  353. }
  354. #define CEIL(val, align) (((rt_size_t)(val) + (align)-1) & ~((align)-1))
  355. /**
  356. * shadow is the accessible region by buddy but not usable for page manager.
  357. * shadow mask is used for calculate the region head from an address.
  358. */
  359. const rt_size_t shadow_mask =
  360. ((1ul << (RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1)) - 1);
  361. #define MPR_SIZE CEIL( \
  362. ((1ul << (ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT))) * sizeof(struct rt_page), \
  363. ARCH_PAGE_SIZE)
  364. #ifdef RT_PAGE_MPR_SIZE_DYNAMIC
  365. /**
  366. * @brief Get the size of Memory Page Region (MPR)
  367. *
  368. * When RT_PAGE_MPR_SIZE_DYNAMIC is enabled, MPR size is calculated at runtime
  369. * for platforms where virtual address width is not a compile-time constant.
  370. *
  371. * @return MPR size in bytes
  372. */
  373. const rt_size_t rt_mpr_size_dynamic(void)
  374. {
  375. return MPR_SIZE;
  376. }
  377. #else
  378. const rt_size_t rt_mpr_size = MPR_SIZE;
  379. #endif
  380. void *rt_mpr_start;
  381. rt_weak int rt_hw_clz(unsigned long n)
  382. {
  383. return __builtin_clzl(n);
  384. }
  385. rt_weak int rt_hw_ctz(unsigned long n)
  386. {
  387. return __builtin_ctzl(n);
  388. }
  389. rt_size_t rt_page_bits(rt_size_t size)
  390. {
  391. int bit = sizeof(rt_size_t) * 8 - rt_hw_clz(size) - 1;
  392. if ((size ^ (1UL << bit)) != 0)
  393. {
  394. bit++;
  395. }
  396. bit -= ARCH_PAGE_SHIFT;
  397. if (bit < 0)
  398. {
  399. bit = 0;
  400. }
  401. return bit;
  402. }
  403. struct rt_page *rt_page_addr2page(void *addr)
  404. {
  405. return addr_to_page(page_start, addr);
  406. }
  407. void *rt_page_page2addr(struct rt_page *p)
  408. {
  409. return page_to_addr(p);
  410. }
  411. static inline struct rt_page *_buddy_get(struct rt_page *p,
  412. rt_uint32_t size_bits)
  413. {
  414. rt_size_t addr;
  415. RT_ASSERT(size_bits < RT_PAGE_MAX_ORDER - 1);
  416. addr = (rt_size_t)rt_page_page2addr(p);
  417. addr ^= (1UL << (size_bits + ARCH_PAGE_SHIFT));
  418. return rt_page_addr2page((void *)addr);
  419. }
  420. static rt_page_t *_get_pgls_head_by_page(pgls_agr_t *agr_pgls, rt_page_t page,
  421. rt_uint32_t size_bits)
  422. {
  423. rt_page_t *pgls_head;
  424. int index;
  425. if (size_bits < AFFID_BLK_BITS)
  426. {
  427. index = PAGE_TO_AFFID(page) >> size_bits;
  428. RT_ASSERT(index < AFFID_NUMOF_ID_IN_SET(size_bits));
  429. RT_ASSERT(PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
  430. pgls_head = &PGLS_GET_AFF_MAP(agr_pgls[size_bits])[index];
  431. }
  432. else
  433. {
  434. RT_ASSERT(!PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
  435. pgls_head = &agr_pgls[size_bits].page_list;
  436. }
  437. return pgls_head;
  438. }
  439. static rt_page_t *_get_pgls_head(pgls_agr_t *agr_pgls, int affid,
  440. rt_uint32_t size_bits)
  441. {
  442. rt_page_t *pgls_head;
  443. int index;
  444. if (size_bits < AFFID_BLK_BITS)
  445. {
  446. index = affid >> size_bits;
  447. RT_ASSERT(index < AFFID_NUMOF_ID_IN_SET(size_bits));
  448. RT_ASSERT(PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
  449. pgls_head = &PGLS_GET_AFF_MAP(agr_pgls[size_bits])[index];
  450. }
  451. else
  452. {
  453. RT_ASSERT(!PGLS_IS_AFF_MAP(agr_pgls[size_bits]));
  454. pgls_head = &agr_pgls[size_bits].page_list;
  455. }
  456. return pgls_head;
  457. }
  458. static void _page_alloc(struct rt_page *p)
  459. {
  460. p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
  461. p->ref_cnt = 1;
  462. }
  463. static void _page_remove(rt_page_t *page_head, struct rt_page *p,
  464. rt_uint32_t size_bits)
  465. {
  466. if (p->pre)
  467. {
  468. p->pre->next = p->next;
  469. }
  470. else
  471. {
  472. *page_head = p->next;
  473. }
  474. if (p->next)
  475. {
  476. p->next->pre = p->pre;
  477. }
  478. RT_ASSERT(p->size_bits == size_bits);
  479. _page_alloc(p);
  480. }
  481. static void _page_insert(rt_page_t *page_head, struct rt_page *p,
  482. rt_uint32_t size_bits)
  483. {
  484. p->next = *page_head;
  485. if (p->next)
  486. {
  487. p->next->pre = p;
  488. }
  489. p->pre = 0;
  490. *page_head = p;
  491. p->size_bits = size_bits;
  492. }
  493. static void _pages_ref_inc(struct rt_page *p, rt_uint32_t size_bits)
  494. {
  495. struct rt_page *page_head;
  496. int idx;
  497. /* find page group head */
  498. idx = p - page_start;
  499. idx = idx & ~((1UL << size_bits) - 1);
  500. page_head = page_start + idx;
  501. page_head = (void *)((char *)page_head + early_offset);
  502. page_head->ref_cnt++;
  503. }
  504. static int _pages_ref_get(struct rt_page *p, rt_uint32_t size_bits)
  505. {
  506. struct rt_page *page_head;
  507. int idx;
  508. /* find page group head */
  509. idx = p - page_start;
  510. idx = idx & ~((1UL << size_bits) - 1);
  511. page_head = page_start + idx;
  512. return page_head->ref_cnt;
  513. }
  514. static int _pages_free(pgls_agr_t page_list[], struct rt_page *p,
  515. char *frame_va, rt_uint32_t size_bits)
  516. {
  517. rt_uint32_t level = size_bits;
  518. struct rt_page *buddy;
  519. RT_ASSERT(p >= page_start);
  520. RT_ASSERT((char *)p < (char *)rt_mpr_start + rt_mpr_size);
  521. RT_ASSERT(rt_kmem_v2p(p));
  522. RT_ASSERT(p->ref_cnt > 0);
  523. RT_ASSERT(p->size_bits == ARCH_ADDRESS_WIDTH_BITS);
  524. RT_ASSERT(size_bits < RT_PAGE_MAX_ORDER);
  525. RT_UNUSED(_pages_are_member);
  526. RT_ASSERT(_pages_are_member((rt_ubase_t)frame_va, size_bits));
  527. p->ref_cnt--;
  528. if (p->ref_cnt != 0)
  529. {
  530. return 0;
  531. }
  532. #ifdef RT_DEBUGGING_PAGE_POISON
  533. _poisoned_pages(frame_va, size_bits);
  534. #endif /* RT_DEBUGGING_PAGE_POISON */
  535. while (level < RT_PAGE_MAX_ORDER - 1)
  536. {
  537. buddy = _buddy_get(p, level);
  538. if (buddy && buddy->size_bits == level)
  539. {
  540. _page_remove(_get_pgls_head_by_page(page_list, buddy, level),
  541. buddy, level);
  542. p = (p < buddy) ? p : buddy;
  543. level++;
  544. }
  545. else
  546. {
  547. break;
  548. }
  549. }
  550. _page_insert(_get_pgls_head_by_page(page_list, p, level),
  551. p, level);
  552. return 1;
  553. }
  554. static struct rt_page *__pages_alloc(
  555. pgls_agr_t agr_pgls[], rt_uint32_t size_bits, int affid,
  556. void (*page_remove)(rt_page_t *page_head, struct rt_page *p,
  557. rt_uint32_t size_bits),
  558. void (*page_insert)(rt_page_t *page_head, struct rt_page *p,
  559. rt_uint32_t size_bits),
  560. void (*page_alloc)(rt_page_t page))
  561. {
  562. rt_page_t *pgls_head = _get_pgls_head(agr_pgls, affid, size_bits);
  563. rt_page_t p = *pgls_head;
  564. if (p)
  565. {
  566. page_remove(pgls_head, p, size_bits);
  567. }
  568. else
  569. {
  570. rt_uint32_t level;
  571. rt_page_t head;
  572. /* fallback for allocation */
  573. for (level = size_bits + 1; level < RT_PAGE_MAX_ORDER; level++)
  574. {
  575. pgls_head = _get_pgls_head(agr_pgls, affid, level);
  576. p = *pgls_head;
  577. if (p)
  578. {
  579. break;
  580. }
  581. }
  582. if (level == RT_PAGE_MAX_ORDER)
  583. {
  584. return 0;
  585. }
  586. page_remove(pgls_head, p, level);
  587. /* pick the page satisfied the affinity tag */
  588. head = p;
  589. p = head + (affid - (affid & AFFID_BITS_MASK(level)));
  590. page_alloc(p);
  591. /* release the pages caller don't need */
  592. while (level > size_bits)
  593. {
  594. long lower_bits = level - 1;
  595. rt_page_t middle = _buddy_get(head, lower_bits);
  596. if (p >= middle)
  597. {
  598. page_insert(
  599. _get_pgls_head_by_page(agr_pgls, head, lower_bits),
  600. head, lower_bits);
  601. head = middle;
  602. }
  603. else
  604. {
  605. page_insert(
  606. _get_pgls_head_by_page(agr_pgls, middle, lower_bits),
  607. middle, lower_bits);
  608. }
  609. level = lower_bits;
  610. }
  611. }
  612. return p;
  613. }
  614. static struct rt_page *_pages_alloc(pgls_agr_t page_list[],
  615. rt_uint32_t size_bits, int affid)
  616. {
  617. return __pages_alloc(page_list, size_bits, affid, _page_remove,
  618. _page_insert, _page_alloc);
  619. }
  620. static void _early_page_remove(rt_page_t *pgls_head, rt_page_t page,
  621. rt_uint32_t size_bits)
  622. {
  623. rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
  624. if (page_cont->pre)
  625. {
  626. rt_page_t pre_cont = (rt_page_t)((char *)page_cont->pre + early_offset);
  627. pre_cont->next = page_cont->next;
  628. }
  629. else
  630. {
  631. *pgls_head = page_cont->next;
  632. }
  633. if (page_cont->next)
  634. {
  635. rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
  636. next_cont->pre = page_cont->pre;
  637. }
  638. RT_ASSERT(page_cont->size_bits == size_bits);
  639. page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
  640. page_cont->ref_cnt = 1;
  641. }
  642. static void _early_page_alloc(rt_page_t page)
  643. {
  644. rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
  645. page_cont->size_bits = ARCH_ADDRESS_WIDTH_BITS;
  646. page_cont->ref_cnt = 1;
  647. }
  648. static void _early_page_insert(rt_page_t *pgls_head, rt_page_t page,
  649. rt_uint32_t size_bits)
  650. {
  651. RT_ASSERT((void *)page >= rt_mpr_start &&
  652. ((char *)page - (char *)rt_mpr_start) < rt_mpr_size);
  653. rt_page_t page_cont = (rt_page_t)((char *)page + early_offset);
  654. page_cont->next = *pgls_head;
  655. if (page_cont->next)
  656. {
  657. rt_page_t next_cont = (rt_page_t)((char *)page_cont->next + early_offset);
  658. next_cont->pre = page;
  659. }
  660. page_cont->pre = 0;
  661. *pgls_head = page;
  662. page_cont->size_bits = size_bits;
  663. }
  664. static struct rt_page *_early_pages_alloc(pgls_agr_t page_list[],
  665. rt_uint32_t size_bits, int affid)
  666. {
  667. return __pages_alloc(page_list, size_bits, affid, _early_page_remove,
  668. _early_page_insert, _early_page_alloc);
  669. }
  670. static pgls_agr_t *_get_page_list(void *vaddr)
  671. {
  672. rt_ubase_t pa_int = (rt_ubase_t)vaddr + PV_OFFSET;
  673. pgls_agr_t *list;
  674. if (pa_int > UINT32_MAX)
  675. {
  676. list = page_list_high;
  677. }
  678. else
  679. {
  680. list = page_list_low;
  681. }
  682. return list;
  683. }
  684. int rt_page_ref_get(void *addr, rt_uint32_t size_bits)
  685. {
  686. struct rt_page *p;
  687. rt_base_t level;
  688. int ref;
  689. p = rt_page_addr2page(addr);
  690. level = rt_spin_lock_irqsave(&_pgmgr_lock);
  691. ref = _pages_ref_get(p, size_bits);
  692. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  693. return ref;
  694. }
  695. void rt_page_ref_inc(void *addr, rt_uint32_t size_bits)
  696. {
  697. struct rt_page *p;
  698. rt_base_t level;
  699. p = rt_page_addr2page(addr);
  700. level = rt_spin_lock_irqsave(&_pgmgr_lock);
  701. _pages_ref_inc(p, size_bits);
  702. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  703. }
  704. static rt_page_t (*pages_alloc_handler)(pgls_agr_t page_list[],
  705. rt_uint32_t size_bits, int affid);
  706. /* if not, we skip the finding on page_list_high */
  707. static size_t _high_page_configured = 0;
  708. static pgls_agr_t *_flag_to_page_list(size_t flags)
  709. {
  710. pgls_agr_t *page_list;
  711. if (_high_page_configured && (flags & PAGE_ANY_AVAILABLE))
  712. {
  713. page_list = page_list_high;
  714. }
  715. else
  716. {
  717. page_list = page_list_low;
  718. }
  719. return page_list;
  720. }
  721. volatile static rt_ubase_t _last_alloc;
  722. rt_inline void *_do_pages_alloc(rt_uint32_t size_bits, size_t flags, int affid)
  723. {
  724. void *alloc_buf = RT_NULL;
  725. struct rt_page *p;
  726. rt_base_t level;
  727. pgls_agr_t *page_list = _flag_to_page_list(flags);
  728. level = rt_spin_lock_irqsave(&_pgmgr_lock);
  729. p = pages_alloc_handler(page_list, size_bits, affid);
  730. if (p)
  731. {
  732. _freed_nr -= 1 << size_bits;
  733. }
  734. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  735. if (!p && page_list != page_list_low)
  736. {
  737. /* fall back */
  738. page_list = page_list_low;
  739. level = rt_spin_lock_irqsave(&_pgmgr_lock);
  740. p = pages_alloc_handler(page_list, size_bits, affid);
  741. if (p)
  742. {
  743. _freed_nr -= 1 << size_bits;
  744. _freed_nr_hi -= 1 << size_bits;
  745. }
  746. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  747. }
  748. if (p)
  749. {
  750. alloc_buf = page_to_addr(p);
  751. _last_alloc = (rt_ubase_t)alloc_buf;
  752. #ifdef RT_DEBUGGING_PAGE_LEAK
  753. level = rt_spin_lock_irqsave(&_spinlock);
  754. TRACE_ALLOC(p, size_bits);
  755. rt_spin_unlock_irqrestore(&_spinlock, level);
  756. #endif
  757. #ifdef RT_DEBUGGING_PAGE_POISON
  758. _unpoisoned_pages(alloc_buf, size_bits);
  759. #endif /* RT_DEBUGGING_PAGE_POISON */
  760. }
  761. return alloc_buf;
  762. }
  763. rt_inline int _get_balanced_id(rt_uint32_t size_bits)
  764. {
  765. rt_ubase_t last_alloc = (_last_alloc / RT_PAGE_AFFINITY_BLOCK_SIZE);
  766. return (last_alloc + (1u << size_bits)) & AFFID_BITS_MASK(size_bits);
  767. }
  768. static void *_do_pages_alloc_noaff(rt_uint32_t size_bits, size_t flags)
  769. {
  770. void *rc = RT_NULL;
  771. if (size_bits < AFFID_BLK_BITS)
  772. {
  773. int try_affid = _get_balanced_id(size_bits);
  774. size_t numof_id = AFFID_NUMOF_ID_IN_SET(size_bits);
  775. size_t valid_affid_mask = numof_id - 1;
  776. for (size_t i = 0; i < numof_id; i++, try_affid += 1 << size_bits)
  777. {
  778. rc = _do_pages_alloc(size_bits, flags, try_affid & valid_affid_mask);
  779. if (rc)
  780. {
  781. break;
  782. }
  783. }
  784. }
  785. else
  786. {
  787. rc = _do_pages_alloc(size_bits, flags, 0);
  788. }
  789. if (!rc)
  790. {
  791. RT_ASSERT(0);
  792. }
  793. return rc;
  794. }
  795. void *rt_pages_alloc(rt_uint32_t size_bits)
  796. {
  797. return _do_pages_alloc_noaff(size_bits, 0);
  798. }
  799. void *rt_pages_alloc_ext(rt_uint32_t size_bits, size_t flags)
  800. {
  801. return _do_pages_alloc_noaff(size_bits, flags);
  802. }
  803. void *rt_pages_alloc_tagged(rt_uint32_t size_bits, long affid, size_t flags)
  804. {
  805. rt_page_t current;
  806. current = _do_pages_alloc(size_bits, flags, affid);
  807. if (current && RT_PAGE_PICK_AFFID(current) != affid)
  808. {
  809. RT_ASSERT(0);
  810. }
  811. return current;
  812. }
  813. int rt_pages_free(void *addr, rt_uint32_t size_bits)
  814. {
  815. struct rt_page *p;
  816. pgls_agr_t *page_list = _get_page_list(addr);
  817. int real_free = 0;
  818. p = rt_page_addr2page(addr);
  819. if (p)
  820. {
  821. rt_base_t level;
  822. level = rt_spin_lock_irqsave(&_pgmgr_lock);
  823. real_free = _pages_free(page_list, p, addr, size_bits);
  824. if (real_free)
  825. {
  826. _freed_nr += 1 << size_bits;
  827. if (page_list == page_list_high)
  828. {
  829. _freed_nr_hi += 1 << size_bits;
  830. }
  831. TRACE_FREE(p, size_bits);
  832. }
  833. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  834. }
  835. return real_free;
  836. }
  837. /* debug command */
  838. int rt_page_list(void) __attribute__((alias("list_page")));
  839. #define PGNR2SIZE(nr) ((nr)*ARCH_PAGE_SIZE / 1024)
  840. static void _dump_page_list(int order, rt_page_t lp, rt_page_t hp,
  841. rt_size_t *pfree)
  842. {
  843. rt_size_t free = 0;
  844. rt_kprintf("level %d ", order);
  845. while (lp)
  846. {
  847. free += (1UL << order);
  848. rt_kprintf("[L:0x%08p]", rt_page_page2addr(lp));
  849. lp = lp->next;
  850. }
  851. while (hp)
  852. {
  853. free += (1UL << order);
  854. rt_kprintf("[H:0x%08p]", rt_page_page2addr(hp));
  855. hp = hp->next;
  856. }
  857. rt_kprintf("\n");
  858. *pfree += free;
  859. }
  860. int list_page(void)
  861. {
  862. int i;
  863. rt_size_t free = 0;
  864. rt_size_t installed = _page_nr;
  865. rt_base_t level;
  866. level = rt_spin_lock_irqsave(&_pgmgr_lock);
  867. /* dump affinity map area */
  868. for (i = 0; i < AFFID_BLK_BITS; i++)
  869. {
  870. rt_page_t *iter_lo = PGLS_GET_AFF_MAP(page_list_low[i]);
  871. rt_page_t *iter_hi = PGLS_GET_AFF_MAP(page_list_high[i]);
  872. rt_size_t list_len = AFFID_NUMOF_ID_IN_SET(i);
  873. for (size_t j = 0; j < list_len; j++)
  874. {
  875. _dump_page_list(i, iter_lo[j], iter_hi[j], &free);
  876. }
  877. }
  878. /* dump normal page list */
  879. for (; i < RT_PAGE_MAX_ORDER; i++)
  880. {
  881. rt_page_t lp = page_list_low[i].page_list;
  882. rt_page_t hp = page_list_high[i].page_list;
  883. _dump_page_list(i, lp, hp, &free);
  884. }
  885. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  886. rt_kprintf("-------------------------------\n");
  887. rt_kprintf("Page Summary:\n => free/installed:\n 0x%lx/0x%lx (%ld/%ld KB)\n",
  888. free, installed, PGNR2SIZE(free), PGNR2SIZE(installed));
  889. rt_kprintf(" => Installed Pages Region:\n");
  890. _print_region_list();
  891. rt_kprintf("-------------------------------\n");
  892. return 0;
  893. }
  894. MSH_CMD_EXPORT(list_page, show page info);
  895. void rt_page_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
  896. {
  897. *total_nr = _page_nr;
  898. *free_nr = _freed_nr;
  899. }
  900. void rt_page_high_get_info(rt_size_t *total_nr, rt_size_t *free_nr)
  901. {
  902. *total_nr = _page_nr_hi;
  903. *free_nr = _freed_nr_hi;
  904. }
  905. static void _invalid_uninstalled_shadow(rt_page_t start, rt_page_t end)
  906. {
  907. for (rt_page_t iter = start; iter < end; iter++)
  908. {
  909. rt_base_t frame = (rt_base_t)rt_page_page2addr(iter);
  910. struct installed_page_reg *page_reg = _find_page_region(frame);
  911. if (page_reg)
  912. {
  913. continue;
  914. }
  915. iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
  916. }
  917. }
  918. static void _install_page(rt_page_t mpr_head, rt_region_t region,
  919. void (*insert)(rt_page_t *ppg, rt_page_t page, rt_uint32_t size_bits))
  920. {
  921. pgls_agr_t *page_list;
  922. rt_page_t *page_head;
  923. rt_region_t shadow;
  924. const rt_base_t pvoffset = PV_OFFSET;
  925. _page_nr += ((region.end - region.start) >> ARCH_PAGE_SHIFT);
  926. _freed_nr += ((region.end - region.start) >> ARCH_PAGE_SHIFT);
  927. shadow.start = region.start & ~shadow_mask;
  928. shadow.end = CEIL(region.end, shadow_mask + 1);
  929. if (shadow.end + pvoffset > UINT32_MAX)
  930. _high_page_configured = 1;
  931. rt_page_t shad_head = addr_to_page(mpr_head, (void *)shadow.start);
  932. rt_page_t shad_tail = addr_to_page(mpr_head, (void *)shadow.end);
  933. rt_page_t head = addr_to_page(mpr_head, (void *)region.start);
  934. rt_page_t tail = addr_to_page(mpr_head, (void *)region.end);
  935. /* mark shadow page records not belongs to other region as illegal */
  936. _invalid_uninstalled_shadow(shad_head, head);
  937. _invalid_uninstalled_shadow(tail, shad_tail);
  938. /* insert reserved pages to list */
  939. const int max_order = RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1;
  940. while (region.start != region.end)
  941. {
  942. struct rt_page *p;
  943. int align_bits;
  944. int size_bits;
  945. int page_order;
  946. size_bits =
  947. ARCH_ADDRESS_WIDTH_BITS - 1 - rt_hw_clz(region.end - region.start);
  948. align_bits = rt_hw_ctz(region.start);
  949. if (align_bits < size_bits)
  950. {
  951. size_bits = align_bits;
  952. }
  953. if (size_bits > max_order)
  954. {
  955. size_bits = max_order;
  956. }
  957. p = addr_to_page(mpr_head, (void *)region.start);
  958. p->size_bits = ARCH_ADDRESS_WIDTH_BITS;
  959. p->ref_cnt = 0;
  960. /* insert to list */
  961. page_list = _get_page_list((void *)region.start);
  962. if (page_list == page_list_high)
  963. {
  964. _page_nr_hi += 1 << (size_bits - ARCH_PAGE_SHIFT);
  965. _freed_nr_hi += 1 << (size_bits - ARCH_PAGE_SHIFT);
  966. }
  967. page_order = size_bits - ARCH_PAGE_SHIFT;
  968. page_head = _get_pgls_head_by_page(page_list, p, page_order);
  969. insert(page_head, (rt_page_t)((char *)p - early_offset), page_order);
  970. region.start += (1UL << size_bits);
  971. }
  972. }
  973. static void *_aligned_to_affinity(rt_ubase_t head_page_pa, void *mapped_to)
  974. {
  975. #define AFFBLK_MASK (RT_PAGE_AFFINITY_BLOCK_SIZE - 1)
  976. rt_ubase_t head_page_pg_aligned;
  977. rt_ubase_t aligned_affblk_tag = (long)mapped_to & AFFBLK_MASK;
  978. head_page_pg_aligned =
  979. ((long)head_page_pa & ~AFFBLK_MASK) | aligned_affblk_tag;
  980. if (head_page_pg_aligned < head_page_pa)
  981. {
  982. /* find the page forward */
  983. head_page_pg_aligned += RT_PAGE_AFFINITY_BLOCK_SIZE;
  984. }
  985. return (void *)head_page_pg_aligned;
  986. }
  987. void rt_page_init(rt_region_t reg)
  988. {
  989. int i;
  990. rt_region_t shadow;
  991. /* setup install page status */
  992. rt_spin_lock_init(&_init_region.lock);
  993. _init_region.region_area = reg;
  994. _init_region.next = RT_NULL;
  995. #ifdef RT_DEBUGGING_PAGE_POISON
  996. _init_region.usage_trace = _init_region_usage_trace;
  997. #endif /* RT_DEBUGGING_PAGE_POISON */
  998. _inst_page_reg_head = &_init_region;
  999. /* adjust install region. inclusive start, exclusive end */
  1000. reg.start += ARCH_PAGE_MASK;
  1001. reg.start &= ~ARCH_PAGE_MASK;
  1002. reg.end &= ~ARCH_PAGE_MASK;
  1003. if (reg.end <= reg.start)
  1004. {
  1005. LOG_E("region end(%p) must greater than start(%p)", reg.start, reg.end);
  1006. RT_ASSERT(0);
  1007. }
  1008. shadow.start = reg.start & ~shadow_mask;
  1009. shadow.end = CEIL(reg.end, shadow_mask + 1);
  1010. LOG_D("[Init page] start: 0x%lx, end: 0x%lx, total: 0x%lx", reg.start,
  1011. reg.end, ((reg.end - reg.start) >> ARCH_PAGE_SHIFT));
  1012. int err;
  1013. /* init free list */
  1014. rt_page_t *aff_pgls_iter_lo = aff_pglist_low;
  1015. rt_page_t *aff_pgls_iter_hi = aff_pglist_high;
  1016. for (i = 0; i < AFFID_BLK_BITS; i++)
  1017. {
  1018. long stride = AFFID_NUMOF_ID_IN_SET(i);
  1019. PGLS_FROM_AFF_MAP(page_list_low[i], aff_pgls_iter_lo);
  1020. PGLS_FROM_AFF_MAP(page_list_high[i], aff_pgls_iter_hi);
  1021. aff_pgls_iter_lo += stride;
  1022. aff_pgls_iter_hi += stride;
  1023. }
  1024. for (; i < RT_PAGE_MAX_ORDER; i++)
  1025. {
  1026. page_list_low[i].page_list = 0;
  1027. page_list_high[i].page_list = 0;
  1028. }
  1029. /* map MPR area */
  1030. err = rt_aspace_map_static(&rt_kernel_space, &mpr_varea, &rt_mpr_start,
  1031. rt_mpr_size, MMU_MAP_K_RWCB, MMF_MAP_FIXED,
  1032. &mm_page_mapper, 0);
  1033. if (err != RT_EOK)
  1034. {
  1035. LOG_E("MPR map failed with size %lx at %p", rt_mpr_size, rt_mpr_start);
  1036. RT_ASSERT(0);
  1037. }
  1038. /* calculate footprint */
  1039. init_mpr_align_start =
  1040. (rt_size_t)addr_to_page(page_start, (void *)shadow.start) &
  1041. ~ARCH_PAGE_MASK;
  1042. init_mpr_align_end =
  1043. CEIL(addr_to_page(page_start, (void *)shadow.end), ARCH_PAGE_SIZE);
  1044. rt_size_t init_mpr_size = init_mpr_align_end - init_mpr_align_start;
  1045. rt_size_t init_mpr_npage = init_mpr_size >> ARCH_PAGE_SHIFT;
  1046. /* find available aligned page */
  1047. init_mpr_cont_start = _aligned_to_affinity(reg.start,
  1048. (void *)init_mpr_align_start);
  1049. rt_size_t init_mpr_cont_end = (rt_size_t)init_mpr_cont_start + init_mpr_size;
  1050. early_offset = (rt_size_t)init_mpr_cont_start - init_mpr_align_start;
  1051. rt_page_t mpr_cont = (void *)((char *)rt_mpr_start + early_offset);
  1052. /* mark init mpr pages as illegal */
  1053. rt_page_t head_cont = addr_to_page(mpr_cont, (void *)reg.start);
  1054. rt_page_t tail_cont = addr_to_page(mpr_cont, (void *)reg.end);
  1055. for (rt_page_t iter = head_cont; iter < tail_cont; iter++)
  1056. {
  1057. iter->size_bits = ARCH_ADDRESS_WIDTH_BITS;
  1058. }
  1059. reg.start = init_mpr_cont_end;
  1060. _install_page(mpr_cont, reg, _early_page_insert);
  1061. pages_alloc_handler = _early_pages_alloc;
  1062. /* doing the page table bushiness */
  1063. if (rt_aspace_load_page(&rt_kernel_space, (void *)init_mpr_align_start, init_mpr_npage))
  1064. {
  1065. LOG_E("%s: failed to load pages", __func__);
  1066. RT_ASSERT(0);
  1067. }
  1068. if (rt_hw_mmu_tbl_get() == rt_kernel_space.page_table)
  1069. rt_page_cleanup();
  1070. }
  1071. static int _load_mpr_area(void *head, void *tail)
  1072. {
  1073. int err = 0;
  1074. char *iter = (char *)((rt_ubase_t)head & ~ARCH_PAGE_MASK);
  1075. tail = (void *)CEIL(tail, ARCH_PAGE_SIZE);
  1076. while (iter != tail)
  1077. {
  1078. void *paddr = rt_kmem_v2p(iter);
  1079. if (paddr == ARCH_MAP_FAILED)
  1080. {
  1081. err = rt_aspace_load_page(&rt_kernel_space, iter, 1);
  1082. if (err != RT_EOK)
  1083. {
  1084. LOG_E("%s: failed to load page", __func__);
  1085. break;
  1086. }
  1087. }
  1088. iter += ARCH_PAGE_SIZE;
  1089. }
  1090. return err;
  1091. }
  1092. static int _get_mpr_ready_n_install(rt_ubase_t inst_head, rt_ubase_t inst_end)
  1093. {
  1094. int err;
  1095. rt_region_t shadow;
  1096. rt_region_t region =
  1097. {
  1098. .start = inst_head,
  1099. .end = inst_end,
  1100. };
  1101. void *head, *tail;
  1102. shadow.start = region.start & ~shadow_mask;
  1103. shadow.end = CEIL(region.end, shadow_mask + 1);
  1104. head = addr_to_page(page_start, (void *)shadow.start);
  1105. tail = addr_to_page(page_start, (void *)shadow.end);
  1106. err = _load_mpr_area(head, tail);
  1107. if (err == RT_EOK)
  1108. {
  1109. rt_ubase_t level = rt_spin_lock_irqsave(&_pgmgr_lock);
  1110. _install_page(rt_mpr_start, region, _page_insert);
  1111. rt_spin_unlock_irqrestore(&_pgmgr_lock, level);
  1112. }
  1113. return err;
  1114. }
  1115. static void _update_region_list(struct installed_page_reg *member,
  1116. rt_ubase_t inst_head, rt_ubase_t inst_end,
  1117. rt_bitmap_t *ut_bitmap)
  1118. {
  1119. rt_spin_lock_init(&member->lock);
  1120. rt_spin_lock(&_inst_page_reg_lock);
  1121. member->region_area.start = inst_head;
  1122. member->region_area.end = inst_end;
  1123. #ifdef RT_DEBUGGING_PAGE_POISON
  1124. member->usage_trace = ut_bitmap;
  1125. #else
  1126. RT_UNUSED(ut_bitmap);
  1127. #endif /* RT_DEBUGGING_PAGE_POISON */
  1128. member->next = _inst_page_reg_head;
  1129. _inst_page_reg_head = member;
  1130. rt_spin_unlock(&_inst_page_reg_lock);
  1131. }
  1132. #define _PAGE_STRIPE (1 << (RT_PAGE_MAX_ORDER + ARCH_PAGE_SHIFT - 1))
  1133. int rt_page_install(rt_region_t region)
  1134. {
  1135. int err = -RT_EINVAL;
  1136. if (region.end != region.start && !(region.start & ARCH_PAGE_MASK) &&
  1137. !(region.end & ARCH_PAGE_MASK))
  1138. {
  1139. rt_ubase_t inst_head = region.start;
  1140. rt_ubase_t inst_end = region.end;
  1141. rt_ubase_t iter = inst_head;
  1142. int pages_count = (inst_end - inst_head) / ARCH_PAGE_SIZE;
  1143. struct installed_page_reg *installed_pgreg =
  1144. rt_calloc(1, sizeof(struct installed_page_reg) +
  1145. RT_BITMAP_LEN(pages_count) * sizeof(rt_bitmap_t));
  1146. if (installed_pgreg)
  1147. {
  1148. _update_region_list(installed_pgreg, inst_head, inst_end,
  1149. (rt_bitmap_t *)(installed_pgreg + 1));
  1150. if ((rt_ubase_t)iter & shadow_mask)
  1151. {
  1152. iter = RT_ALIGN((rt_ubase_t)inst_head, _PAGE_STRIPE);
  1153. _get_mpr_ready_n_install(inst_head, iter < inst_end ? iter : inst_end);
  1154. }
  1155. for (rt_ubase_t next = iter + _PAGE_STRIPE; next < inst_end;
  1156. iter = next, next += _PAGE_STRIPE)
  1157. {
  1158. _get_mpr_ready_n_install(iter, next);
  1159. }
  1160. if (iter < inst_end)
  1161. {
  1162. _get_mpr_ready_n_install(iter, inst_end);
  1163. }
  1164. }
  1165. }
  1166. return err;
  1167. }
  1168. void rt_page_cleanup(void)
  1169. {
  1170. early_offset = 0;
  1171. pages_alloc_handler = _pages_alloc;
  1172. }