dfs_pcache.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-05-05 RTT Implement mnt in dfs v2.0
  9. * 2023-10-23 Shell fix synchronization of data to icache
  10. */
  11. #define DBG_TAG "dfs.pcache"
  12. #define DBG_LVL DBG_WARNING
  13. #include <rtdbg.h>
  14. #include <dfs_pcache.h>
  15. #include <dfs_dentry.h>
  16. #include <dfs_mnt.h>
  17. #include <rthw.h>
  18. #ifdef RT_USING_PAGECACHE
  19. #include <mm_page.h>
  20. #include <mm_private.h>
  21. #include <mmu.h>
  22. #include <tlb.h>
  23. #ifndef RT_PAGECACHE_COUNT
  24. #define RT_PAGECACHE_COUNT 4096
  25. #endif
  26. #ifndef RT_PAGECACHE_ASPACE_COUNT
  27. #define RT_PAGECACHE_ASPACE_COUNT 1024
  28. #endif
  29. #ifndef RT_PAGECACHE_PRELOAD
  30. #define RT_PAGECACHE_PRELOAD 4
  31. #endif
  32. #ifndef RT_PAGECACHE_GC_WORK_LEVEL
  33. #define RT_PAGECACHE_GC_WORK_LEVEL 90
  34. #endif
  35. #ifndef RT_PAGECACHE_GC_STOP_LEVEL
  36. #define RT_PAGECACHE_GC_STOP_LEVEL 70
  37. #endif
  38. #define PCACHE_MQ_GC 1
  39. #define PCACHE_MQ_WB 2
  40. struct dfs_aspace_mmap_obj
  41. {
  42. rt_uint32_t cmd;
  43. struct rt_mailbox *ack;
  44. struct dfs_file *file;
  45. struct rt_varea *varea;
  46. void *data;
  47. };
  48. struct dfs_pcache_mq_obj
  49. {
  50. struct rt_mailbox *ack;
  51. rt_uint32_t cmd;
  52. };
  53. static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos);
  54. static void dfs_page_ref(struct dfs_page *page);
  55. static int dfs_page_inactive(struct dfs_page *page);
  56. static int dfs_page_remove(struct dfs_page *page);
  57. static void dfs_page_release(struct dfs_page *page);
  58. static int dfs_page_dirty(struct dfs_page *page);
  59. static int dfs_aspace_release(struct dfs_aspace *aspace);
  60. static int dfs_aspace_lock(struct dfs_aspace *aspace);
  61. static int dfs_aspace_unlock(struct dfs_aspace *aspace);
  62. static int dfs_pcache_lock(void);
  63. static int dfs_pcache_unlock(void);
  64. static struct dfs_pcache __pcache;
  65. /**
  66. * @brief Perform garbage collection on an address space to release pages
  67. *
  68. * This function attempts to release a specified number of pages from both inactive
  69. * and active lists of the given address space. It prioritizes releasing pages from
  70. * the inactive list first before moving to the active list.
  71. *
  72. * @param[in] aspace Pointer to the address space structure to perform GC on
  73. * @param[in] count Number of pages to attempt to release
  74. *
  75. * @return Number of pages actually released (count - remaining)
  76. */
  77. static int dfs_aspace_gc(struct dfs_aspace *aspace, int count)
  78. {
  79. int cnt = count;
  80. if (aspace)
  81. {
  82. dfs_aspace_lock(aspace);
  83. if (aspace->pages_count > 0)
  84. {
  85. struct dfs_page *page = RT_NULL;
  86. rt_list_t *node = aspace->list_inactive.next;
  87. while (cnt && node != &aspace->list_active)
  88. {
  89. page = rt_list_entry(node, struct dfs_page, space_node);
  90. node = node->next;
  91. if (dfs_page_remove(page) == 0)
  92. {
  93. cnt --;
  94. }
  95. }
  96. node = aspace->list_active.next;
  97. while (cnt && node != &aspace->list_inactive)
  98. {
  99. page = rt_list_entry(node, struct dfs_page, space_node);
  100. node = node->next;
  101. if (dfs_page_remove(page) == 0)
  102. {
  103. cnt --;
  104. }
  105. }
  106. }
  107. dfs_aspace_unlock(aspace);
  108. }
  109. return count - cnt;
  110. }
  111. /**
  112. * @brief Release page cache entries to free up memory
  113. *
  114. * This function attempts to release a specified number of page cache entries.
  115. * If count is 0, it calculates the number of pages to release based on the
  116. * current cache size and GC stop level. It first tries to release from inactive
  117. * list, then from active list if needed.
  118. *
  119. * @param[in] count Number of pages to release. If 0, calculates automatically
  120. * based on current cache size and GC stop level.
  121. *
  122. * @note The function uses LRU (Least Recently Used) policy by prioritizing
  123. * inactive list over active list.
  124. */
  125. void dfs_pcache_release(size_t count)
  126. {
  127. rt_list_t *node = RT_NULL;
  128. struct dfs_aspace *aspace = RT_NULL;
  129. dfs_pcache_lock();
  130. if (count == 0)
  131. {
  132. count = rt_atomic_load(&(__pcache.pages_count)) - RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_STOP_LEVEL / 100;
  133. }
  134. node = __pcache.list_inactive.next;
  135. while (count && node != &__pcache.list_active)
  136. {
  137. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  138. node = node->next;
  139. if (aspace)
  140. {
  141. count -= dfs_aspace_gc(aspace, count);
  142. dfs_aspace_release(aspace);
  143. }
  144. }
  145. node = __pcache.list_active.next;
  146. while (count && node != &__pcache.list_inactive)
  147. {
  148. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  149. node = node->next;
  150. if (aspace)
  151. {
  152. count -= dfs_aspace_gc(aspace, count);
  153. }
  154. }
  155. dfs_pcache_unlock();
  156. }
  157. /**
  158. * @brief Clean up page cache entries for a specific mount point
  159. *
  160. * This function iterates through both inactive and active lists of the page cache
  161. * to clean up entries associated with the given mount point. It performs cleanup
  162. * and calls the provided callback function for each matching address space.
  163. *
  164. * @param[in] mnt Pointer to the mount point structure to clean up
  165. * @param[in] cb Callback function to be called for each matching address space
  166. * The callback takes an address space pointer and returns an integer
  167. */
  168. static void _pcache_clean(struct dfs_mnt *mnt, int (*cb)(struct dfs_aspace *aspace))
  169. {
  170. rt_list_t *node = RT_NULL;
  171. struct dfs_aspace *aspace = RT_NULL;
  172. dfs_pcache_lock();
  173. node = __pcache.list_inactive.next;
  174. while (node != &__pcache.list_active)
  175. {
  176. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  177. node = node->next;
  178. if (aspace && aspace->mnt == mnt)
  179. {
  180. dfs_aspace_clean(aspace);
  181. cb(aspace);
  182. }
  183. }
  184. node = __pcache.list_active.next;
  185. while (node != &__pcache.list_inactive)
  186. {
  187. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  188. node = node->next;
  189. if (aspace && aspace->mnt == mnt)
  190. {
  191. dfs_aspace_clean(aspace);
  192. cb(aspace);
  193. }
  194. }
  195. dfs_pcache_unlock();
  196. }
  197. /**
  198. * @brief Unmount and clean up page cache for a specific mount point
  199. *
  200. * This function cleans up all page cache entries associated with the given mount point
  201. * by calling _pcache_clean() with dfs_aspace_release as the callback function.
  202. * It will release all address spaces and their pages belonging to this mount point.
  203. *
  204. * @param[in] mnt Pointer to the mount point structure to be unmounted
  205. *
  206. * @note This function is typically called during filesystem unmount operation
  207. * @see _pcache_clean()
  208. */
  209. void dfs_pcache_unmount(struct dfs_mnt *mnt)
  210. {
  211. _pcache_clean(mnt, dfs_aspace_release);
  212. }
  213. static int _dummy_cb(struct dfs_aspace *mnt)
  214. {
  215. return 0;
  216. }
  217. /**
  218. * @brief Clean page cache for a specific mount point without releasing address spaces
  219. *
  220. * This function cleans up all page cache entries associated with the given mount point
  221. * but keeps the address spaces intact by using a dummy callback function.
  222. *
  223. * @param[in] mnt Pointer to the mount point structure to be cleaned
  224. *
  225. * @note Typical usage scenarios:
  226. * - Filesystem maintenance operations that require cache invalidation
  227. * - Force refreshing cached data without unmounting
  228. * - Handling external modifications to mounted filesystems
  229. *
  230. * @see _pcache_clean()
  231. */
  232. void dfs_pcache_clean(struct dfs_mnt *mnt)
  233. {
  234. _pcache_clean(mnt, _dummy_cb);
  235. }
  236. /**
  237. * @brief Check and enforce page cache memory limit
  238. *
  239. * This function checks if the current page cache usage exceeds the working level threshold.
  240. * If exceeded, it will trigger page cache release up to 4 times to reduce cache size.
  241. *
  242. * @return Always returns 0 indicating success
  243. */
  244. static int dfs_pcache_limit_check(void)
  245. {
  246. int index = 4;
  247. while (index && rt_atomic_load(&(__pcache.pages_count)) > RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
  248. {
  249. dfs_pcache_release(0);
  250. index --;
  251. }
  252. return 0;
  253. }
  254. /**
  255. * @brief Page cache management thread
  256. *
  257. * This is the main worker thread for page cache management. It handles:
  258. * - Garbage collection (GC) requests to free up memory
  259. * - Write-back (WB) requests to flush dirty pages to storage
  260. *
  261. * @param[in] parameter Thread parameter (unused)
  262. *
  263. * @note The thread runs in an infinite loop processing messages from the cache message queue:
  264. * - For GC commands: calls dfs_pcache_limit_check() to free pages when cache is full
  265. * - For WB commands: flushes dirty pages that have been dirty for at least 500ms
  266. * - Processes up to 4 dirty pages per WB command to prevent thread starvation
  267. */
  268. static void dfs_pcache_thread(void *parameter)
  269. {
  270. struct dfs_pcache_mq_obj work;
  271. while (1)
  272. {
  273. if (rt_mq_recv(__pcache.mqueue, &work, sizeof(work), RT_WAITING_FOREVER) == sizeof(work))
  274. {
  275. if (work.cmd == PCACHE_MQ_GC)
  276. {
  277. dfs_pcache_limit_check();
  278. }
  279. else if (work.cmd == PCACHE_MQ_WB)
  280. {
  281. int count = 0;
  282. rt_list_t *node;
  283. struct dfs_page *page = 0;
  284. while (1)
  285. {
  286. /* try to get dirty page */
  287. dfs_pcache_lock();
  288. page = 0;
  289. rt_list_for_each(node, &__pcache.list_active)
  290. {
  291. if (node != &__pcache.list_inactive)
  292. {
  293. struct dfs_aspace *aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  294. dfs_aspace_lock(aspace);
  295. if (aspace->list_dirty.next != &aspace->list_dirty)
  296. {
  297. page = rt_list_entry(aspace->list_dirty.next, struct dfs_page, dirty_node);
  298. dfs_page_ref(page);
  299. dfs_aspace_unlock(aspace);
  300. break;
  301. }
  302. else
  303. {
  304. page = RT_NULL;
  305. }
  306. dfs_aspace_unlock(aspace);
  307. }
  308. }
  309. dfs_pcache_unlock();
  310. if (page)
  311. {
  312. struct dfs_aspace *aspace = page->aspace;
  313. dfs_aspace_lock(aspace);
  314. if (page->is_dirty == 1 && aspace->vnode)
  315. {
  316. if (rt_tick_get_millisecond() - page->tick_ms >= 500)
  317. {
  318. if (aspace->vnode->size < page->fpos + page->size)
  319. {
  320. page->len = aspace->vnode->size - page->fpos;
  321. }
  322. else
  323. {
  324. page->len = page->size;
  325. }
  326. if (aspace->ops->write)
  327. {
  328. aspace->ops->write(page);
  329. }
  330. page->is_dirty = 0;
  331. if (page->dirty_node.next != RT_NULL)
  332. {
  333. rt_list_remove(&page->dirty_node);
  334. page->dirty_node.next = RT_NULL;
  335. }
  336. }
  337. }
  338. dfs_page_release(page);
  339. dfs_aspace_unlock(aspace);
  340. }
  341. else
  342. {
  343. break;
  344. }
  345. rt_thread_mdelay(5);
  346. count ++;
  347. if (count >= 4)
  348. {
  349. break;
  350. }
  351. }
  352. }
  353. }
  354. }
  355. }
  356. /**
  357. * @brief Initialize the page cache system
  358. *
  359. * This function initializes the global page cache structure including:
  360. * - Hash table for address space lookup
  361. * - Active and inactive page lists
  362. * - Page count tracking
  363. * - Mutex for thread safety
  364. * - Message queue for cache operations
  365. * - Worker thread for background tasks
  366. *
  367. * @return 0 on success, negative error code on failure
  368. *
  369. * @note This function is automatically called during system initialization
  370. * via INIT_PREV_EXPORT macro. It sets up all necessary infrastructure
  371. * for page cache management.
  372. */
  373. static int dfs_pcache_init(void)
  374. {
  375. rt_thread_t tid;
  376. for (int i = 0; i < RT_PAGECACHE_HASH_NR; i++)
  377. {
  378. rt_list_init(&__pcache.head[i]);
  379. }
  380. rt_list_init(&__pcache.list_active);
  381. rt_list_init(&__pcache.list_inactive);
  382. rt_list_insert_after(&__pcache.list_active, &__pcache.list_inactive);
  383. rt_atomic_store(&(__pcache.pages_count), 0);
  384. rt_mutex_init(&__pcache.lock, "pcache", RT_IPC_FLAG_PRIO);
  385. __pcache.mqueue = rt_mq_create("pcache", sizeof(struct dfs_pcache_mq_obj), 1024, RT_IPC_FLAG_FIFO);
  386. tid = rt_thread_create("pcache", dfs_pcache_thread, 0, 8192, 25, 5);
  387. if (tid)
  388. {
  389. rt_thread_startup(tid);
  390. }
  391. __pcache.last_time_wb = rt_tick_get_millisecond();
  392. return 0;
  393. }
  394. INIT_PREV_EXPORT(dfs_pcache_init);
  395. /**
  396. * @brief Send a command to page cache message queue
  397. *
  398. * This function sends a command to the page cache message queue for processing
  399. * by the cache management thread. It waits for the message to be sent.
  400. *
  401. * @param[in] cmd The command to send (PCACHE_MQ_GC or PCACHE_MQ_WB)
  402. *
  403. * @return RT_EOK on success, error code on failure
  404. *
  405. * @note This is used to trigger garbage collection or write-back operations
  406. * asynchronously through the cache management thread.
  407. */
  408. static rt_ubase_t dfs_pcache_mq_work(rt_uint32_t cmd)
  409. {
  410. rt_err_t err;
  411. struct dfs_pcache_mq_obj work = { 0 };
  412. work.cmd = cmd;
  413. err = rt_mq_send_wait(__pcache.mqueue, (const void *)&work, sizeof(struct dfs_pcache_mq_obj), 0);
  414. return err;
  415. }
  416. /**
  417. * @brief Lock the page cache global mutex
  418. *
  419. * @return Always returns 0.
  420. */
  421. static int dfs_pcache_lock(void)
  422. {
  423. rt_mutex_take(&__pcache.lock, RT_WAITING_FOREVER);
  424. return 0;
  425. }
  426. /**
  427. * @brief Unlock the page cache global mutex
  428. *
  429. * @return Always returns 0.
  430. */
  431. static int dfs_pcache_unlock(void)
  432. {
  433. rt_mutex_release(&__pcache.lock);
  434. return 0;
  435. }
  436. /**
  437. * @brief Calculate hash value for address space lookup
  438. *
  439. * This function computes a hash value based on mount point and path string.
  440. * It uses a simple string hashing algorithm combined with mount point pointer.
  441. *
  442. * @param[in] mnt Pointer to the mount point structure
  443. * @param[in] path Path string to be hashed (can be NULL)
  444. *
  445. * @return Computed hash value within range [0, RT_PAGECACHE_HASH_NR-1]
  446. *
  447. * @note The hash algorithm combines:
  448. * - DJB2 hash algorithm for the path string
  449. * - XOR with mount point pointer
  450. * - Modulo operation to fit hash table size
  451. */
  452. static uint32_t dfs_aspace_hash(struct dfs_mnt *mnt, const char *path)
  453. {
  454. uint32_t val = 0;
  455. if (path)
  456. {
  457. while (*path)
  458. {
  459. val = ((val << 5) + val) + *path++;
  460. }
  461. }
  462. return (val ^ (unsigned long)mnt) & (RT_PAGECACHE_HASH_NR - 1);
  463. }
  464. /**
  465. * @brief Look up an address space in the page cache hash table
  466. *
  467. * This function searches for an address space matching the given dentry and operations
  468. * in the page cache hash table. If found, it increments the reference count of the
  469. * address space before returning it.
  470. *
  471. * @param[in] dentry Directory entry containing mount point and path information
  472. * @param[in] ops Pointer to address space operations structure
  473. *
  474. * @return Pointer to the found address space on success, NULL if not found
  475. */
  476. static struct dfs_aspace *dfs_aspace_hash_lookup(struct dfs_dentry *dentry, const struct dfs_aspace_ops *ops)
  477. {
  478. struct dfs_aspace *aspace = RT_NULL;
  479. dfs_pcache_lock();
  480. rt_list_for_each_entry(aspace, &__pcache.head[dfs_aspace_hash(dentry->mnt, dentry->pathname)], hash_node)
  481. {
  482. if (aspace->mnt == dentry->mnt
  483. && aspace->ops == ops
  484. && !strcmp(aspace->pathname, dentry->pathname))
  485. {
  486. rt_atomic_add(&aspace->ref_count, 1);
  487. dfs_pcache_unlock();
  488. return aspace;
  489. }
  490. }
  491. dfs_pcache_unlock();
  492. return RT_NULL;
  493. }
  494. /**
  495. * @brief Insert an address space into page cache
  496. *
  497. * This function inserts the given address space into both the hash table and
  498. * inactive list of the page cache. It also increments the reference count of
  499. * the address space.
  500. *
  501. * @param[in,out] aspace Pointer to the address space to be inserted
  502. */
  503. static void dfs_aspace_insert(struct dfs_aspace *aspace)
  504. {
  505. uint32_t val = 0;
  506. val = dfs_aspace_hash(aspace->mnt, aspace->pathname);
  507. dfs_pcache_lock();
  508. rt_atomic_add(&aspace->ref_count, 1);
  509. rt_list_insert_after(&__pcache.head[val], &aspace->hash_node);
  510. rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
  511. dfs_pcache_unlock();
  512. }
  513. /**
  514. * @brief Remove an address space from page cache
  515. *
  516. * This function removes the given address space from both the hash table and
  517. * active/inactive lists of the page cache.
  518. *
  519. * @param[in,out] aspace Pointer to the address space to be removed
  520. */
  521. static void dfs_aspace_remove(struct dfs_aspace *aspace)
  522. {
  523. dfs_pcache_lock();
  524. if (aspace->hash_node.next != RT_NULL)
  525. {
  526. rt_list_remove(&aspace->hash_node);
  527. }
  528. if (aspace->cache_node.next != RT_NULL)
  529. {
  530. rt_list_remove(&aspace->cache_node);
  531. }
  532. dfs_pcache_unlock();
  533. }
  534. /**
  535. * @brief Move an address space to active list
  536. *
  537. * This function moves the specified address space from its current position
  538. * to the active list in the page cache. The active list contains frequently
  539. * accessed address spaces.
  540. *
  541. * @param[in,out] aspace Pointer to the address space to be activated
  542. *
  543. * @note Insert the address space before inactive list's head, means putting it
  544. * to the end of the active list.
  545. *
  546. * @see dfs_aspace_inactive() for the opposite operation
  547. */
  548. static void dfs_aspace_active(struct dfs_aspace *aspace)
  549. {
  550. dfs_pcache_lock();
  551. if (aspace->cache_node.next != RT_NULL)
  552. {
  553. rt_list_remove(&aspace->cache_node);
  554. rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
  555. }
  556. dfs_pcache_unlock();
  557. }
  558. /**
  559. * @brief Move an address space to inactive list
  560. *
  561. * This function moves the specified address space from its current position
  562. * to the inactive list in the page cache. The inactive list contains less
  563. * frequently accessed address spaces that are candidates for eviction.
  564. *
  565. * @param[in,out] aspace Pointer to the address space to be deactivated
  566. */
  567. static void dfs_aspace_inactive(struct dfs_aspace *aspace)
  568. {
  569. dfs_pcache_lock();
  570. if (aspace->cache_node.next != RT_NULL)
  571. {
  572. rt_list_remove(&aspace->cache_node);
  573. rt_list_insert_before(&__pcache.list_active, &aspace->cache_node);
  574. }
  575. dfs_pcache_unlock();
  576. }
  577. /**
  578. * @brief Internal function to create a new address space for page cache
  579. *
  580. * This function allocates and initializes a new address space structure for page caching.
  581. * It sets up all necessary lists, locks, and initial values for the address space.
  582. *
  583. * @param[in] dentry Directory entry containing mount point and path information (can be NULL)
  584. * @param[in] vnode Pointer to the vnode structure this address space will be associated with
  585. * @param[in] ops Pointer to address space operations structure
  586. *
  587. * @return Pointer to the newly created address space on success, NULL on failure
  588. *
  589. * @note The created address space will be automatically inserted into the page cache
  590. * @see dfs_aspace_create() for the public interface to create address spaces
  591. */
  592. static struct dfs_aspace *_dfs_aspace_create(struct dfs_dentry *dentry,
  593. struct dfs_vnode *vnode,
  594. const struct dfs_aspace_ops *ops)
  595. {
  596. struct dfs_aspace *aspace;
  597. aspace = rt_calloc(1, sizeof(struct dfs_aspace));
  598. if (aspace)
  599. {
  600. rt_list_init(&aspace->list_active);
  601. rt_list_init(&aspace->list_inactive);
  602. rt_list_init(&aspace->list_dirty);
  603. rt_list_insert_after(&aspace->list_active, &aspace->list_inactive);
  604. aspace->avl_root.root_node = 0;
  605. aspace->avl_page = 0;
  606. rt_mutex_init(&aspace->lock, rt_thread_self()->parent.name, RT_IPC_FLAG_PRIO);
  607. rt_atomic_store(&aspace->ref_count, 1);
  608. aspace->pages_count = 0;
  609. aspace->vnode = vnode;
  610. aspace->ops = ops;
  611. if (dentry && dentry->mnt)
  612. {
  613. aspace->mnt = dentry->mnt;
  614. aspace->fullpath = rt_strdup(dentry->mnt->fullpath);
  615. aspace->pathname = rt_strdup(dentry->pathname);
  616. }
  617. dfs_aspace_insert(aspace);
  618. }
  619. return aspace;
  620. }
  621. /**
  622. * @brief Create or lookup an address space for page caching
  623. *
  624. * This function either creates a new address space or looks up an existing one
  625. * in the page cache hash table. If found, it updates the vnode reference and
  626. * activates the address space.
  627. *
  628. * @param[in] dentry Directory entry containing mount point and path info (can be NULL)
  629. * @param[in] vnode Pointer to the vnode structure to associate with
  630. * @param[in] ops Pointer to address space operations structure
  631. *
  632. * @return Pointer to the found/created address space on success, NULL on failure
  633. */
  634. struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry,
  635. struct dfs_vnode *vnode,
  636. const struct dfs_aspace_ops *ops)
  637. {
  638. struct dfs_aspace *aspace = RT_NULL;
  639. RT_ASSERT(vnode && ops);
  640. dfs_pcache_lock();
  641. if (dentry)
  642. {
  643. aspace = dfs_aspace_hash_lookup(dentry, ops);
  644. }
  645. if (!aspace)
  646. {
  647. aspace = _dfs_aspace_create(dentry, vnode, ops);
  648. }
  649. else
  650. {
  651. aspace->vnode = vnode;
  652. dfs_aspace_active(aspace);
  653. }
  654. dfs_pcache_unlock();
  655. return aspace;
  656. }
  657. /**
  658. * @brief Destroy an address space and release its resources
  659. *
  660. * This function decrements the reference count of the address space and marks it as inactive.
  661. * If the reference count reaches 1 and there are no pages left, it will be fully released.
  662. *
  663. * @param[in] aspace Pointer to the address space to be destroyed
  664. *
  665. * @return 0 on successful release, -EINVAL if aspace is NULL
  666. */
  667. int dfs_aspace_destroy(struct dfs_aspace *aspace)
  668. {
  669. int ret = -EINVAL;
  670. if (aspace)
  671. {
  672. dfs_pcache_lock();
  673. dfs_aspace_lock(aspace);
  674. rt_atomic_sub(&aspace->ref_count, 1);
  675. RT_ASSERT(rt_atomic_load(&aspace->ref_count) > 0);
  676. dfs_aspace_inactive(aspace);
  677. aspace->vnode = RT_NULL;
  678. if (dfs_aspace_release(aspace) != 0)
  679. {
  680. dfs_aspace_unlock(aspace);
  681. }
  682. dfs_pcache_unlock();
  683. }
  684. return ret;
  685. }
  686. /**
  687. * @brief Release an address space when its reference count reaches 1
  688. *
  689. * This function checks if the address space can be safely released by verifying:
  690. * - Reference count is 1 (only caller holds reference)
  691. * - No pages remain in the address space
  692. * If conditions are met, it removes the space from cache and frees all resources.
  693. *
  694. * @param[in] aspace Pointer to the address space to be released
  695. *
  696. * @return 0 on successful release, -1 if space cannot be released yet
  697. */
  698. static int dfs_aspace_release(struct dfs_aspace *aspace)
  699. {
  700. int ret = -1;
  701. if (aspace)
  702. {
  703. dfs_pcache_lock();
  704. dfs_aspace_lock(aspace);
  705. if (rt_atomic_load(&aspace->ref_count) == 1 && aspace->pages_count == 0)
  706. {
  707. dfs_aspace_remove(aspace);
  708. if (aspace->fullpath)
  709. {
  710. rt_free(aspace->fullpath);
  711. }
  712. if (aspace->pathname)
  713. {
  714. rt_free(aspace->pathname);
  715. }
  716. rt_mutex_detach(&aspace->lock);
  717. rt_free(aspace);
  718. ret = 0;
  719. }
  720. else
  721. {
  722. dfs_aspace_unlock(aspace);
  723. }
  724. dfs_pcache_unlock();
  725. }
  726. return ret;
  727. }
  728. /**
  729. * @brief Dump address space page information for debugging
  730. *
  731. * This function prints detailed information about pages in the given address space.
  732. * It can optionally filter to show only dirty pages or all pages.
  733. *
  734. * @param[in] aspace Pointer to the address space to dump
  735. * @param[in] is_dirty Flag indicating whether to show only dirty pages (1) or all pages (0)
  736. *
  737. * @return Always returns 0
  738. */
  739. static int _dfs_aspace_dump(struct dfs_aspace *aspace, int is_dirty)
  740. {
  741. if (aspace)
  742. {
  743. rt_list_t *next;
  744. struct dfs_page *page;
  745. dfs_aspace_lock(aspace);
  746. if (aspace->pages_count > 0)
  747. {
  748. rt_list_for_each(next, &aspace->list_inactive)
  749. {
  750. if (next != &aspace->list_active)
  751. {
  752. page = rt_list_entry(next, struct dfs_page, space_node);
  753. if (is_dirty && page->is_dirty)
  754. {
  755. rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
  756. }
  757. else if (is_dirty == 0)
  758. {
  759. rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
  760. }
  761. }
  762. }
  763. }
  764. else
  765. {
  766. rt_kprintf(" pages >> empty\n");
  767. }
  768. dfs_aspace_unlock(aspace);
  769. }
  770. return 0;
  771. }
  772. /**
  773. * @brief Dump page cache information for debugging purposes
  774. *
  775. * This function prints detailed information about the page cache, including:
  776. * - Total page count and capacity
  777. * - File paths and page counts for each address space
  778. * - Optional detailed page information (with --dump or --dirty flags)
  779. *
  780. * @param[in] argc Number of command line arguments
  781. * @param[in] argv Command line arguments array
  782. *
  783. * @return Always returns 0
  784. *
  785. * @see _dfs_aspace_dump() for the actual page dumping implementation
  786. */
  787. static int dfs_pcache_dump(int argc, char **argv)
  788. {
  789. int dump = 0;
  790. rt_list_t *node;
  791. struct dfs_aspace *aspace;
  792. if (argc == 2)
  793. {
  794. if (strcmp(argv[1], "--dump") == 0)
  795. {
  796. dump = 1;
  797. }
  798. else if (strcmp(argv[1], "--dirty") == 0)
  799. {
  800. dump = 2;
  801. }
  802. else
  803. {
  804. rt_kprintf("dfs page cache dump\n");
  805. rt_kprintf("usage: dfs_cache\n");
  806. rt_kprintf(" dfs_cache --dump\n");
  807. rt_kprintf(" dfs_cache --dirty\n");
  808. return 0;
  809. }
  810. }
  811. dfs_pcache_lock();
  812. rt_kprintf("total pages count: %d / %d\n", rt_atomic_load(&(__pcache.pages_count)), RT_PAGECACHE_COUNT);
  813. rt_list_for_each(node, &__pcache.list_active)
  814. {
  815. if (node != &__pcache.list_inactive)
  816. {
  817. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  818. if (aspace->mnt)
  819. {
  820. rt_kprintf("file: %s%s pages: %d\n", aspace->fullpath, aspace->pathname, aspace->pages_count);
  821. }
  822. else
  823. {
  824. rt_kprintf("unknown type, pages: %d\n", aspace->pages_count);
  825. }
  826. if (dump > 0)
  827. {
  828. _dfs_aspace_dump(aspace, dump == 2 ? 1 : 0);
  829. }
  830. }
  831. }
  832. dfs_pcache_unlock();
  833. return 0;
  834. }
  835. MSH_CMD_EXPORT_ALIAS(dfs_pcache_dump, dfs_cache, dump dfs page cache);
  836. /**
  837. * @brief Unmap all memory mappings for a page
  838. *
  839. * This function unmaps all virtual memory areas that have mapped this physical page.
  840. * It also marks the page as dirty if it contains valid data that hasn't been written back.
  841. *
  842. * @param[in,out] page Pointer to the page structure to unmap
  843. *
  844. * @return Always returns 0
  845. */
  846. static int dfs_page_unmap(struct dfs_page *page)
  847. {
  848. rt_list_t *next;
  849. struct dfs_mmap *map;
  850. next = page->mmap_head.next;
  851. if (next != &page->mmap_head && page->fpos < page->aspace->vnode->size)
  852. {
  853. dfs_page_dirty(page);
  854. }
  855. while (next != &page->mmap_head)
  856. {
  857. map = rt_list_entry(next, struct dfs_mmap, mmap_node);
  858. next = next->next;
  859. if (map)
  860. {
  861. rt_varea_t varea;
  862. void *vaddr;
  863. varea = rt_aspace_query(map->aspace, map->vaddr);
  864. RT_ASSERT(varea);
  865. vaddr = dfs_aspace_vaddr(varea, page->fpos);
  866. rt_varea_unmap_page(varea, vaddr);
  867. rt_free(map);
  868. }
  869. }
  870. rt_list_init(&page->mmap_head);
  871. return 0;
  872. }
  873. /**
  874. * @brief Create a new page structure for page cache
  875. *
  876. * This function allocates and initializes a new page structure for the page cache.
  877. * It allocates physical memory for the page and initializes its metadata including:
  878. * - Memory mapping list head
  879. * - Reference count
  880. * - Physical page allocation with affinity hint
  881. *
  882. * @param[in] pos File position used to determine page allocation affinity
  883. *
  884. * @return Pointer to the newly created page structure on success, NULL on failure
  885. */
  886. static struct dfs_page *dfs_page_create(off_t pos)
  887. {
  888. struct dfs_page *page = RT_NULL;
  889. int affid = RT_PAGE_PICK_AFFID(pos);
  890. page = rt_calloc(1, sizeof(struct dfs_page));
  891. if (page)
  892. {
  893. page->page = rt_pages_alloc_tagged(0, affid, PAGE_ANY_AVAILABLE);
  894. if (page->page)
  895. {
  896. /* memset(page->page, 0x00, ARCH_PAGE_SIZE); */
  897. rt_list_init(&page->mmap_head);
  898. rt_atomic_store(&(page->ref_count), 1);
  899. }
  900. else
  901. {
  902. LOG_E("page alloc failed!\n");
  903. rt_free(page);
  904. page = RT_NULL;
  905. }
  906. }
  907. return page;
  908. }
  909. /**
  910. * @brief Increment the reference count of a page
  911. *
  912. * This function atomically increases the reference count of the specified page.
  913. * It is used to track how many times the page is being referenced/used.
  914. *
  915. * @param[in,out] page Pointer to the page structure whose reference count will be incremented
  916. */
  917. static void dfs_page_ref(struct dfs_page *page)
  918. {
  919. rt_atomic_add(&(page->ref_count), 1);
  920. }
  921. /**
  922. * @brief Release a page from page cache when reference count reaches zero
  923. *
  924. * This function decrements the reference count of a page and performs cleanup
  925. * when the count reaches zero. It handles:
  926. * - Unmapping all virtual mappings of the page
  927. * - Writing back dirty pages to storage
  928. * - Freeing physical memory and page structure
  929. *
  930. * @param[in,out] page Pointer to the page structure to be released
  931. */
  932. static void dfs_page_release(struct dfs_page *page)
  933. {
  934. struct dfs_aspace *aspace = page->aspace;
  935. dfs_aspace_lock(aspace);
  936. rt_atomic_sub(&(page->ref_count), 1);
  937. if (rt_atomic_load(&(page->ref_count)) == 0)
  938. {
  939. dfs_page_unmap(page);
  940. if (page->is_dirty == 1 && aspace->vnode)
  941. {
  942. if (aspace->vnode->size < page->fpos + page->size)
  943. {
  944. page->len = aspace->vnode->size - page->fpos;
  945. }
  946. else
  947. {
  948. page->len = page->size;
  949. }
  950. if (aspace->ops->write)
  951. {
  952. aspace->ops->write(page);
  953. }
  954. page->is_dirty = 0;
  955. }
  956. RT_ASSERT(page->is_dirty == 0);
  957. rt_pages_free(page->page, 0);
  958. page->page = RT_NULL;
  959. rt_free(page);
  960. }
  961. dfs_aspace_unlock(aspace);
  962. }
  963. /**
  964. * @brief Compare file positions for page alignment
  965. *
  966. * This function compares two file positions to determine if they belong to the same page.
  967. * It aligns both positions to page boundaries before comparison.
  968. *
  969. * @param[in] fpos File position to compare (byte offset)
  970. * @param[in] value Reference file position to compare against (byte offset)
  971. *
  972. * @return 0 if positions are in the same page, negative if fpos is before value,
  973. * positive if fpos is after value
  974. */
  975. static int dfs_page_compare(off_t fpos, off_t value)
  976. {
  977. return fpos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE - value;
  978. }
  979. /**
  980. * @brief Insert a page into the AVL tree of an address space
  981. *
  982. * This function inserts a page into the AVL tree of the specified address space.
  983. * The tree is ordered by the file position (fpos) of pages. If a page with the
  984. * same fpos already exists, the insertion fails.
  985. *
  986. * @param[in] aspace Pointer to the address space containing the AVL tree
  987. * @param[in,out] page Pointer to the page structure to be inserted
  988. *
  989. * @return 0 on successful insertion, -1 if a page with same fpos already exists
  990. *
  991. * @note The function:
  992. * - Maintains AVL tree balance after insertion
  993. * - Updates the aspace's avl_page pointer to the newly inserted page
  994. * - Uses file position (fpos) as the ordering key
  995. */
  996. static int _dfs_page_insert(struct dfs_aspace *aspace, struct dfs_page *page)
  997. {
  998. struct dfs_page *tmp;
  999. struct util_avl_struct *current = NULL;
  1000. struct util_avl_struct **next = &(aspace->avl_root.root_node);
  1001. /* Figure out where to put new node */
  1002. while (*next)
  1003. {
  1004. current = *next;
  1005. tmp = rt_container_of(current, struct dfs_page, avl_node);
  1006. if (page->fpos < tmp->fpos)
  1007. next = &(current->avl_left);
  1008. else if (page->fpos > tmp->fpos)
  1009. next = &(current->avl_right);
  1010. else
  1011. return -1;
  1012. }
  1013. /* Add new node and rebalance tree. */
  1014. util_avl_link(&page->avl_node, current, next);
  1015. util_avl_rebalance(current, &aspace->avl_root);
  1016. aspace->avl_page = page;
  1017. return 0;
  1018. }
  1019. /**
  1020. * @brief Remove a page from the AVL tree of an address space
  1021. *
  1022. * This function removes a page from the AVL tree of the specified address space.
  1023. * It also clears the cached AVL page pointer if it points to the page being removed.
  1024. *
  1025. * @param[in,out] aspace Pointer to the address space containing the AVL tree
  1026. * @param[in,out] page Pointer to the page structure to be removed
  1027. */
  1028. static void _dfs_page_remove(struct dfs_aspace *aspace, struct dfs_page *page)
  1029. {
  1030. if (aspace->avl_page && aspace->avl_page == page)
  1031. {
  1032. aspace->avl_page = 0;
  1033. }
  1034. util_avl_remove(&page->avl_node, &aspace->avl_root);
  1035. }
  1036. /**
  1037. * @brief Lock an address space for thread-safe operations
  1038. *
  1039. * @param[in,out] aspace Pointer to the address space structure to be locked
  1040. *
  1041. * @return Always returns 0 indicating success
  1042. *
  1043. * @note The lock must be released using dfs_aspace_unlock()
  1044. * @see dfs_aspace_unlock()
  1045. */
  1046. static int dfs_aspace_lock(struct dfs_aspace *aspace)
  1047. {
  1048. rt_mutex_take(&aspace->lock, RT_WAITING_FOREVER);
  1049. return 0;
  1050. }
  1051. /**
  1052. * @brief Unlock an address space after thread-safe operations
  1053. *
  1054. * @param[in,out] aspace Pointer to the address space structure to be unlocked
  1055. *
  1056. * @return Always returns 0 indicating success
  1057. *
  1058. * @note Must be called after dfs_aspace_lock() to release the lock
  1059. * @see dfs_aspace_lock()
  1060. */
  1061. static int dfs_aspace_unlock(struct dfs_aspace *aspace)
  1062. {
  1063. rt_mutex_release(&aspace->lock);
  1064. return 0;
  1065. }
  1066. /**
  1067. * @brief Insert a page into the address space's page cache
  1068. *
  1069. * This function inserts a page into the active list of the address space's page cache.
  1070. * It maintains the page count and performs eviction if the cache exceeds its capacity.
  1071. *
  1072. * @param[in] page Pointer to the page structure to be inserted
  1073. *
  1074. * @return Always returns 0 indicating success
  1075. */
  1076. static int dfs_page_insert(struct dfs_page *page)
  1077. {
  1078. struct dfs_aspace *aspace = page->aspace;
  1079. dfs_aspace_lock(aspace);
  1080. rt_list_insert_before(&aspace->list_inactive, &page->space_node);
  1081. aspace->pages_count ++;
  1082. if (_dfs_page_insert(aspace, page))
  1083. {
  1084. RT_ASSERT(0);
  1085. }
  1086. if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
  1087. {
  1088. rt_list_t *next = aspace->list_active.next;
  1089. if (next != &aspace->list_inactive)
  1090. {
  1091. struct dfs_page *tmp = rt_list_entry(next, struct dfs_page, space_node);
  1092. dfs_page_inactive(tmp);
  1093. }
  1094. }
  1095. rt_atomic_add(&(__pcache.pages_count), 1);
  1096. dfs_aspace_unlock(aspace);
  1097. return 0;
  1098. }
  1099. /**
  1100. * @brief Remove a page from the address space's page cache
  1101. *
  1102. * This function safely removes a page from both the space and dirty lists of the address space.
  1103. * It decrements the reference count and releases the page if it's the last reference.
  1104. *
  1105. * @param[in] page Pointer to the page structure to be removed
  1106. *
  1107. * @return 0 if the page was successfully removed, -1 if the page is still referenced
  1108. */
  1109. static int dfs_page_remove(struct dfs_page *page)
  1110. {
  1111. int ret = -1;
  1112. struct dfs_aspace *aspace = page->aspace;
  1113. dfs_aspace_lock(aspace);
  1114. if (rt_atomic_load(&(page->ref_count)) == 1)
  1115. {
  1116. if (page->space_node.next != RT_NULL)
  1117. {
  1118. rt_list_remove(&page->space_node);
  1119. page->space_node.next = RT_NULL;
  1120. aspace->pages_count--;
  1121. _dfs_page_remove(aspace, page);
  1122. }
  1123. if (page->dirty_node.next != RT_NULL)
  1124. {
  1125. rt_list_remove(&page->dirty_node);
  1126. page->dirty_node.next = RT_NULL;
  1127. }
  1128. rt_atomic_sub(&(__pcache.pages_count), 1);
  1129. dfs_page_release(page);
  1130. ret = 0;
  1131. }
  1132. dfs_aspace_unlock(aspace);
  1133. return ret;
  1134. }
  1135. /**
  1136. * @brief Move a page to active list
  1137. *
  1138. * This function moves a page to the active list
  1139. * within its associated address space.
  1140. *
  1141. * @param[in] page The page to be moved to active list
  1142. * @return int Always returns 0 on success
  1143. */
  1144. static int dfs_page_active(struct dfs_page *page)
  1145. {
  1146. struct dfs_aspace *aspace = page->aspace;
  1147. dfs_aspace_lock(aspace);
  1148. if (page->space_node.next != RT_NULL)
  1149. {
  1150. rt_list_remove(&page->space_node);
  1151. rt_list_insert_before(&aspace->list_inactive, &page->space_node);
  1152. }
  1153. dfs_aspace_unlock(aspace);
  1154. return 0;
  1155. }
  1156. /**
  1157. * @brief Move a page to inactive list
  1158. *
  1159. * This function moves a page to the inactive list
  1160. * within its associated address space.
  1161. *
  1162. * @param[in] page The page to be moved to inactive list
  1163. * @return int Always returns 0 on success
  1164. */
  1165. static int dfs_page_inactive(struct dfs_page *page)
  1166. {
  1167. struct dfs_aspace *aspace = page->aspace;
  1168. dfs_aspace_lock(aspace);
  1169. if (page->space_node.next != RT_NULL)
  1170. {
  1171. rt_list_remove(&page->space_node);
  1172. rt_list_insert_before(&aspace->list_active, &page->space_node);
  1173. }
  1174. dfs_aspace_unlock(aspace);
  1175. return 0;
  1176. }
  1177. /**
  1178. * @brief Mark a page as dirty and manage dirty list
  1179. *
  1180. * This function marks a page as dirty and adds it to the dirty list if not already present.
  1181. * It also triggers a write-back operation if more than 1 second has passed since last write-back.
  1182. *
  1183. * @param[in] page The page to be marked as dirty
  1184. * @return int Always returns 0 on success
  1185. */
  1186. static int dfs_page_dirty(struct dfs_page *page)
  1187. {
  1188. struct dfs_aspace *aspace = page->aspace;
  1189. dfs_aspace_lock(aspace);
  1190. if (page->dirty_node.next == RT_NULL && page->space_node.next != RT_NULL)
  1191. {
  1192. rt_list_insert_before(&aspace->list_dirty, &page->dirty_node);
  1193. }
  1194. page->is_dirty = 1;
  1195. page->tick_ms = rt_tick_get_millisecond();
  1196. if (rt_tick_get_millisecond() - __pcache.last_time_wb >= 1000)
  1197. {
  1198. dfs_pcache_mq_work(PCACHE_MQ_WB);
  1199. __pcache.last_time_wb = rt_tick_get_millisecond();
  1200. }
  1201. dfs_aspace_unlock(aspace);
  1202. return 0;
  1203. }
  1204. /**
  1205. * @brief Search for a page in the address space AVL tree
  1206. *
  1207. * This function searches for a page at the specified file position in the address space's AVL tree.
  1208. * If found, it marks the page as active and increments its reference count.
  1209. *
  1210. * @param[in] aspace The address space to search in
  1211. * @param[in] fpos The file position to search for
  1212. * @return struct dfs_page* The found page, or RT_NULL if not found
  1213. */
  1214. static struct dfs_page *dfs_page_search(struct dfs_aspace *aspace, off_t fpos)
  1215. {
  1216. int cmp;
  1217. struct dfs_page *page;
  1218. struct util_avl_struct *avl_node;
  1219. dfs_aspace_lock(aspace);
  1220. if (aspace->avl_page && dfs_page_compare(fpos, aspace->avl_page->fpos) == 0)
  1221. {
  1222. page = aspace->avl_page;
  1223. dfs_page_active(page);
  1224. dfs_page_ref(page);
  1225. dfs_aspace_unlock(aspace);
  1226. return page;
  1227. }
  1228. avl_node = aspace->avl_root.root_node;
  1229. while (avl_node)
  1230. {
  1231. page = rt_container_of(avl_node, struct dfs_page, avl_node);
  1232. cmp = dfs_page_compare(fpos, page->fpos);
  1233. if (cmp < 0)
  1234. {
  1235. avl_node = avl_node->avl_left;
  1236. }
  1237. else if (cmp > 0)
  1238. {
  1239. avl_node = avl_node->avl_right;
  1240. }
  1241. else
  1242. {
  1243. aspace->avl_page = page;
  1244. dfs_page_active(page);
  1245. dfs_page_ref(page);
  1246. dfs_aspace_unlock(aspace);
  1247. return page;
  1248. }
  1249. }
  1250. dfs_aspace_unlock(aspace);
  1251. return RT_NULL;
  1252. }
  1253. /**
  1254. * @brief Load a page from file into address space cache
  1255. *
  1256. * This function creates a new page cache entry for the specified file position,
  1257. * reads the content from the file into the page, and inserts it into the cache.
  1258. * The page's reference count is incremented to prevent c eviction.
  1259. *
  1260. * @param[in] file Pointer to the file structure containing the vnode and aspace
  1261. * @param[in] pos File position to load (will be page-aligned)
  1262. *
  1263. * @return Pointer to the newly created and loaded page on success,
  1264. * NULL on failure or invalid parameters
  1265. */
  1266. static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
  1267. {
  1268. struct dfs_page *page = RT_NULL;
  1269. if (file && file->vnode && file->vnode->aspace)
  1270. {
  1271. struct dfs_vnode *vnode = file->vnode;
  1272. struct dfs_aspace *aspace = vnode->aspace;
  1273. page = dfs_page_create(pos);
  1274. if (page)
  1275. {
  1276. page->aspace = aspace;
  1277. page->size = ARCH_PAGE_SIZE;
  1278. page->fpos = RT_ALIGN_DOWN(pos, ARCH_PAGE_SIZE);
  1279. aspace->ops->read(file, page);
  1280. page->ref_count ++;
  1281. dfs_page_insert(page);
  1282. }
  1283. }
  1284. return page;
  1285. }
  1286. /**
  1287. * @brief Look up a page in the cache and load it if not found
  1288. *
  1289. * This function searches for a page at the specified position in the file's address space.
  1290. * If the page isn't found, it preloads multiple pages (RT_PAGECACHE_PRELOAD count) next to the requested position.
  1291. * It also triggers garbage collection when the cache reaches certain thresholds.
  1292. *
  1293. * @param[in] file Pointer to the file structure containing the vnode and aspace
  1294. * @param[in] pos File position to look up (will be page-aligned)
  1295. *
  1296. * @return Pointer to the found or newly loaded page on success,
  1297. * NULL if the page couldn't be found or loaded
  1298. */
  1299. static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos)
  1300. {
  1301. struct dfs_page *page = RT_NULL;
  1302. struct dfs_aspace *aspace = file->vnode->aspace;
  1303. dfs_aspace_lock(aspace);
  1304. page = dfs_page_search(aspace, pos);
  1305. if (!page)
  1306. {
  1307. int count = RT_PAGECACHE_PRELOAD;
  1308. struct dfs_page *tmp = RT_NULL;
  1309. off_t fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
  1310. do
  1311. {
  1312. page = dfs_aspace_load_page(file, fpos);
  1313. if (page)
  1314. {
  1315. if (tmp == RT_NULL)
  1316. {
  1317. tmp = page;
  1318. }
  1319. else
  1320. {
  1321. dfs_page_release(page);
  1322. }
  1323. }
  1324. else
  1325. {
  1326. break;
  1327. }
  1328. fpos += ARCH_PAGE_SIZE;
  1329. page = dfs_page_search(aspace, fpos);
  1330. if (page)
  1331. {
  1332. dfs_page_release(page);
  1333. }
  1334. count --;
  1335. } while (count && page == RT_NULL);
  1336. page = tmp;
  1337. if (page)
  1338. {
  1339. dfs_aspace_unlock(aspace);
  1340. if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT)
  1341. {
  1342. dfs_pcache_limit_check();
  1343. }
  1344. else if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
  1345. {
  1346. dfs_pcache_mq_work(PCACHE_MQ_GC);
  1347. }
  1348. return page;
  1349. }
  1350. }
  1351. dfs_aspace_unlock(aspace);
  1352. return page;
  1353. }
  1354. /**
  1355. * @brief Read data from file through address space page cache
  1356. *
  1357. * This function reads data from a file using its address space page cache. It handles
  1358. * the lookup of pages containing the requested data, copies the data to the provided
  1359. * buffer, and manages page references.
  1360. *
  1361. * @param[in] file Pointer to the file structure containing vnode and aspace
  1362. * @param[in] buf Buffer to store the read data
  1363. * @param[in] count Number of bytes to read
  1364. * @param[in,out] pos Pointer to the file position (updated during reading)
  1365. *
  1366. * @return Number of bytes successfully read, or negative error code
  1367. */
  1368. int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  1369. {
  1370. int ret = -EINVAL;
  1371. if (file && file->vnode && file->vnode->aspace)
  1372. {
  1373. if (!(file->vnode->aspace->ops->read))
  1374. return ret;
  1375. struct dfs_vnode *vnode = file->vnode;
  1376. struct dfs_aspace *aspace = vnode->aspace;
  1377. struct dfs_page *page;
  1378. char *ptr = (char *)buf;
  1379. ret = 0;
  1380. while (count)
  1381. {
  1382. page = dfs_page_lookup(file, *pos);
  1383. if (page)
  1384. {
  1385. off_t len;
  1386. dfs_aspace_lock(aspace);
  1387. if (aspace->vnode->size < page->fpos + ARCH_PAGE_SIZE)
  1388. {
  1389. len = aspace->vnode->size - *pos;
  1390. }
  1391. else
  1392. {
  1393. len = page->fpos + ARCH_PAGE_SIZE - *pos;
  1394. }
  1395. len = count > len ? len : count;
  1396. if (len > 0)
  1397. {
  1398. rt_memcpy(ptr, page->page + *pos - page->fpos, len);
  1399. ptr += len;
  1400. *pos += len;
  1401. count -= len;
  1402. ret += len;
  1403. }
  1404. else
  1405. {
  1406. dfs_page_release(page);
  1407. dfs_aspace_unlock(aspace);
  1408. break;
  1409. }
  1410. dfs_page_release(page);
  1411. dfs_aspace_unlock(aspace);
  1412. }
  1413. else
  1414. {
  1415. break;
  1416. }
  1417. }
  1418. }
  1419. return ret;
  1420. }
  1421. /**
  1422. * @brief Write data to file through address space page cache
  1423. *
  1424. * This function writes data to a file using its address space page cache. It handles
  1425. * page lookup, data copying, dirty page marking, and synchronization operations.
  1426. *
  1427. * @param[in] file Pointer to the file structure containing vnode and aspace
  1428. * @param[in] buf Buffer containing data to write
  1429. * @param[in] count Number of bytes to write
  1430. * @param[in,out] pos Pointer to the file position (updated during writing)
  1431. *
  1432. * @return Number of bytes successfully written, or negative error code
  1433. */
  1434. int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  1435. {
  1436. int ret = -EINVAL;
  1437. if (file && file->vnode && file->vnode->aspace)
  1438. {
  1439. struct dfs_vnode *vnode = file->vnode;
  1440. struct dfs_aspace *aspace = vnode->aspace;
  1441. struct dfs_page *page;
  1442. char *ptr = (char *)buf;
  1443. if (!(aspace->ops->write))
  1444. {
  1445. return ret;
  1446. }
  1447. else if (aspace->mnt && (aspace->mnt->flags & MNT_RDONLY))
  1448. {
  1449. return -EROFS;
  1450. }
  1451. ret = 0;
  1452. while (count)
  1453. {
  1454. page = dfs_page_lookup(file, *pos);
  1455. if (page)
  1456. {
  1457. off_t len;
  1458. dfs_aspace_lock(aspace);
  1459. len = page->fpos + ARCH_PAGE_SIZE - *pos;
  1460. len = count > len ? len : count;
  1461. rt_memcpy(page->page + *pos - page->fpos, ptr, len);
  1462. ptr += len;
  1463. *pos += len;
  1464. count -= len;
  1465. ret += len;
  1466. if (*pos > aspace->vnode->size)
  1467. {
  1468. aspace->vnode->size = *pos;
  1469. }
  1470. if (file->flags & O_SYNC)
  1471. {
  1472. if (aspace->vnode->size < page->fpos + page->size)
  1473. {
  1474. page->len = aspace->vnode->size - page->fpos;
  1475. }
  1476. else
  1477. {
  1478. page->len = page->size;
  1479. }
  1480. aspace->ops->write(page);
  1481. page->is_dirty = 0;
  1482. }
  1483. else
  1484. {
  1485. dfs_page_dirty(page);
  1486. }
  1487. dfs_page_release(page);
  1488. dfs_aspace_unlock(aspace);
  1489. }
  1490. else
  1491. {
  1492. break;
  1493. }
  1494. }
  1495. }
  1496. return ret;
  1497. }
  1498. /**
  1499. * @brief Flush dirty pages in an address space to storage
  1500. *
  1501. * This function writes all dirty pages in the specified address space to storage,
  1502. * ensuring data persistence. It handles page size adjustments and clears dirty flags
  1503. * after successful writes.
  1504. *
  1505. * @param[in] aspace Pointer to the address space containing dirty pages
  1506. *
  1507. * @return Always returns 0 (success)
  1508. */
  1509. int dfs_aspace_flush(struct dfs_aspace *aspace)
  1510. {
  1511. if (aspace)
  1512. {
  1513. rt_list_t *next;
  1514. struct dfs_page *page;
  1515. dfs_aspace_lock(aspace);
  1516. if (aspace->pages_count > 0 && aspace->vnode)
  1517. {
  1518. rt_list_for_each(next, &aspace->list_dirty)
  1519. {
  1520. page = rt_list_entry(next, struct dfs_page, dirty_node);
  1521. if (page->is_dirty == 1 && aspace->vnode)
  1522. {
  1523. if (aspace->vnode->size < page->fpos + page->size)
  1524. {
  1525. page->len = aspace->vnode->size - page->fpos;
  1526. }
  1527. else
  1528. {
  1529. page->len = page->size;
  1530. }
  1531. if (aspace->ops->write)
  1532. {
  1533. aspace->ops->write(page);
  1534. }
  1535. page->is_dirty = 0;
  1536. }
  1537. RT_ASSERT(page->is_dirty == 0);
  1538. }
  1539. }
  1540. dfs_aspace_unlock(aspace);
  1541. }
  1542. return 0;
  1543. }
  1544. /**
  1545. * @brief Clean all pages from an address space
  1546. *
  1547. * This function removes all active pages from the specified address space while
  1548. * maintaining thread safety through proper locking. It skips inactive pages
  1549. * during the cleanup process.
  1550. *
  1551. * @param[in] aspace Pointer to the address space structure to clean
  1552. *
  1553. * @return 0 on success, negative value on error
  1554. */
  1555. int dfs_aspace_clean(struct dfs_aspace *aspace)
  1556. {
  1557. if (aspace)
  1558. {
  1559. dfs_aspace_lock(aspace);
  1560. if (aspace->pages_count > 0)
  1561. {
  1562. rt_list_t *next = aspace->list_active.next;
  1563. struct dfs_page *page;
  1564. while (next && next != &aspace->list_active)
  1565. {
  1566. if (next == &aspace->list_inactive)
  1567. {
  1568. next = next->next;
  1569. continue;
  1570. }
  1571. page = rt_list_entry(next, struct dfs_page, space_node);
  1572. next = next->next;
  1573. dfs_page_remove(page);
  1574. }
  1575. }
  1576. dfs_aspace_unlock(aspace);
  1577. }
  1578. return 0;
  1579. }
  1580. /**
  1581. * @brief Map a file page into virtual address space
  1582. *
  1583. * This function maps a file page into the specified virtual address space, handling
  1584. * memory allocation, page lookup, and cache synchronization. It ensures proper
  1585. * memory visibility across different CPU architectures with cache operations.
  1586. *
  1587. * @param[in] file Pointer to the file structure
  1588. * @param[in] varea Pointer to the virtual address area structure
  1589. * @param[in] vaddr Virtual address to map the page to
  1590. *
  1591. * @return Pointer to the mapped page on success, NULL on failure
  1592. *
  1593. * @note This function handles cache synchronization for architectures with weak
  1594. * memory models or Harvard architectures to ensure data visibility. It also
  1595. * manages the mapping structure lifecycle through proper allocation/free.
  1596. */
  1597. void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1598. {
  1599. void *ret = RT_NULL;
  1600. struct dfs_page *page;
  1601. struct dfs_aspace *aspace = file->vnode->aspace;
  1602. rt_aspace_t target_aspace = varea->aspace;
  1603. page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
  1604. if (page)
  1605. {
  1606. struct dfs_mmap *map = (struct dfs_mmap *)rt_calloc(1, sizeof(struct dfs_mmap));
  1607. if (map)
  1608. {
  1609. void *pg_vaddr = page->page;
  1610. void *pg_paddr = rt_kmem_v2p(pg_vaddr);
  1611. int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
  1612. if (err == RT_EOK)
  1613. {
  1614. /**
  1615. * Note: While the page is mapped into user area, the data writing into the page
  1616. * is not guaranteed to be visible for machines with the *weak* memory model and
  1617. * those Harvard architecture (especially for those ARM64) cores for their
  1618. * out-of-order pipelines of data buffer. Besides if the instruction cache in the
  1619. * L1 memory system is a VIPT cache, there are chances to have the alias matching
  1620. * entry if we reuse the same page frame and map it into the same virtual address
  1621. * of the previous one.
  1622. *
  1623. * That's why we have to do synchronization and cleanup manually to ensure that
  1624. * fetching of the next instruction can see the coherent data with the data cache,
  1625. * TLB, MMU, main memory, and all the other observers in the computer system.
  1626. */
  1627. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
  1628. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
  1629. ret = pg_vaddr;
  1630. map->aspace = target_aspace;
  1631. map->vaddr = vaddr;
  1632. dfs_aspace_lock(aspace);
  1633. rt_list_insert_after(&page->mmap_head, &map->mmap_node);
  1634. dfs_page_release(page);
  1635. dfs_aspace_unlock(aspace);
  1636. }
  1637. else
  1638. {
  1639. dfs_page_release(page);
  1640. rt_free(map);
  1641. }
  1642. }
  1643. else
  1644. {
  1645. dfs_page_release(page);
  1646. }
  1647. }
  1648. return ret;
  1649. }
  1650. /**
  1651. * @brief Unmap pages from virtual address space
  1652. *
  1653. * This function removes mappings of file pages within the specified virtual address range.
  1654. * It handles cache synchronization and maintains page dirty status when unmapping.
  1655. *
  1656. * @param[in] file Pointer to the file structure
  1657. * @param[in] varea Pointer to the virtual address area to unmap
  1658. *
  1659. * @return 0 on success
  1660. *
  1661. * @note This function handles both private and shared mappings, ensuring proper
  1662. * cache synchronization and page dirty status maintenance during unmapping.
  1663. */
  1664. int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
  1665. {
  1666. struct dfs_vnode *vnode = file->vnode;
  1667. struct dfs_aspace *aspace = vnode->aspace;
  1668. void *unmap_start = varea->start;
  1669. void *unmap_end = (char *)unmap_start + varea->size;
  1670. if (aspace)
  1671. {
  1672. rt_list_t *next;
  1673. struct dfs_page *page;
  1674. dfs_aspace_lock(aspace);
  1675. if (aspace->pages_count > 0)
  1676. {
  1677. rt_list_for_each(next, &aspace->list_active)
  1678. {
  1679. if (next != &aspace->list_inactive)
  1680. {
  1681. page = rt_list_entry(next, struct dfs_page, space_node);
  1682. if (page)
  1683. {
  1684. rt_list_t *node, *tmp;
  1685. struct dfs_mmap *map;
  1686. rt_varea_t map_varea = RT_NULL;
  1687. node = page->mmap_head.next;
  1688. while (node != &page->mmap_head)
  1689. {
  1690. rt_aspace_t map_aspace;
  1691. map = rt_list_entry(node, struct dfs_mmap, mmap_node);
  1692. tmp = node;
  1693. node = node->next;
  1694. if (map && varea->aspace == map->aspace
  1695. && map->vaddr >= unmap_start && map->vaddr < unmap_end)
  1696. {
  1697. void *vaddr = map->vaddr;
  1698. map_aspace = map->aspace;
  1699. if (!map_varea || map_varea->aspace != map_aspace ||
  1700. vaddr < map_varea->start ||
  1701. vaddr >= map_varea->start + map_varea->size)
  1702. {
  1703. /* lock the tree so we don't access uncompleted data */
  1704. map_varea = rt_aspace_query(map_aspace, vaddr);
  1705. }
  1706. rt_varea_unmap_page(map_varea, vaddr);
  1707. if (!rt_varea_is_private_locked(varea) &&
  1708. page->fpos < page->aspace->vnode->size)
  1709. {
  1710. dfs_page_dirty(page);
  1711. }
  1712. rt_list_remove(tmp);
  1713. rt_free(map);
  1714. break;
  1715. }
  1716. }
  1717. }
  1718. }
  1719. }
  1720. }
  1721. dfs_aspace_unlock(aspace);
  1722. }
  1723. return 0;
  1724. }
  1725. /**
  1726. * Unmap a page from virtual address space.
  1727. *
  1728. * @param[in] file The file object containing the page
  1729. * @param[in] varea The virtual memory area
  1730. * @param[in] vaddr The virtual address to unmap
  1731. *
  1732. * @return Always returns 0 on success
  1733. *
  1734. * @note This function removes the mapping between a virtual address and a physical page.
  1735. * It handles cleanup of mmap structures and marks pages dirty if needed.
  1736. */
  1737. int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1738. {
  1739. struct dfs_page *page;
  1740. struct dfs_aspace *aspace = file->vnode->aspace;
  1741. if (aspace)
  1742. {
  1743. dfs_aspace_lock(aspace);
  1744. page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
  1745. if (page)
  1746. {
  1747. rt_list_t *node, *tmp;
  1748. struct dfs_mmap *map;
  1749. rt_varea_unmap_page(varea, vaddr);
  1750. node = page->mmap_head.next;
  1751. while (node != &page->mmap_head)
  1752. {
  1753. map = rt_list_entry(node, struct dfs_mmap, mmap_node);
  1754. tmp = node;
  1755. node = node->next;
  1756. if (map && varea->aspace == map->aspace && vaddr == map->vaddr)
  1757. {
  1758. if (!rt_varea_is_private_locked(varea))
  1759. {
  1760. dfs_page_dirty(page);
  1761. }
  1762. rt_list_remove(tmp);
  1763. rt_free(map);
  1764. break;
  1765. }
  1766. }
  1767. dfs_page_release(page);
  1768. }
  1769. dfs_aspace_unlock(aspace);
  1770. }
  1771. return 0;
  1772. }
  1773. /**
  1774. * Mark a page as dirty in the address space.
  1775. *
  1776. * @param[in] file The file object containing the page
  1777. * @param[in] varea The virtual memory area
  1778. * @param[in] vaddr The virtual address of the page
  1779. *
  1780. * @return Always returns 0 on success
  1781. *
  1782. * @note This function marks a specific page as dirty in the file's address space.
  1783. * The page is released after being marked dirty.
  1784. */
  1785. int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1786. {
  1787. struct dfs_page *page;
  1788. struct dfs_aspace *aspace = file->vnode->aspace;
  1789. if (aspace)
  1790. {
  1791. dfs_aspace_lock(aspace);
  1792. page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
  1793. if (page)
  1794. {
  1795. dfs_page_dirty(page);
  1796. dfs_page_release(page);
  1797. }
  1798. dfs_aspace_unlock(aspace);
  1799. }
  1800. return 0;
  1801. }
  1802. /**
  1803. * Calculate file position from virtual address.
  1804. *
  1805. * @param[in] varea The virtual memory area
  1806. * @param[in] vaddr The virtual address to convert
  1807. *
  1808. * @return The calculated file position offset
  1809. */
  1810. off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr)
  1811. {
  1812. return (off_t)(intptr_t)vaddr - (off_t)(intptr_t)varea->start + varea->offset * ARCH_PAGE_SIZE;
  1813. }
  1814. /**
  1815. * Get the virtual address corresponding to a file position in a virtual area.
  1816. *
  1817. * @param[in] varea The virtual area structure
  1818. * @param[in] fpos The file position to convert
  1819. *
  1820. * @return The virtual address corresponding to the file position
  1821. */
  1822. void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos)
  1823. {
  1824. return varea->start + fpos - varea->offset * ARCH_PAGE_SIZE;
  1825. }
  1826. /**
  1827. * @brief Read data from memory-mapped file space
  1828. *
  1829. * This function handles read operations for memory-mapped file regions by
  1830. * translating virtual addresses to file positions and performing the actual
  1831. * read operation through dfs_aspace_read.
  1832. *
  1833. * @param[in] file Pointer to the file structure being mapped
  1834. * @param[in] varea Pointer to the virtual memory area structure
  1835. * @param[in] data Pointer to the I/O message containing read details
  1836. * (includes fault address and buffer address)
  1837. *
  1838. * @return Number of bytes successfully read (ARCH_PAGE_SIZE on success)
  1839. * 0 if any parameter is invalid
  1840. */
  1841. int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data)
  1842. {
  1843. int ret = 0;
  1844. if (file && varea)
  1845. {
  1846. struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
  1847. if (msg)
  1848. {
  1849. off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
  1850. return dfs_aspace_read(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
  1851. }
  1852. }
  1853. return ret;
  1854. }
  1855. /**
  1856. * @brief Write data to memory-mapped file space
  1857. *
  1858. * This function handles write operations for memory-mapped file regions by
  1859. * translating virtual addresses to file positions and performing the actual
  1860. * write operation through dfs_aspace_write.
  1861. *
  1862. * @param[in] file Pointer to the file structure being mapped
  1863. * @param[in] varea Pointer to the virtual memory area structure
  1864. * @param[in] data Pointer to the I/O message containing write details
  1865. * (includes fault address and buffer address)
  1866. *
  1867. * @return Number of bytes successfully written (ARCH_PAGE_SIZE on success)
  1868. * 0 if any parameter is invalid
  1869. */
  1870. int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data)
  1871. {
  1872. int ret = 0;
  1873. if (file && varea)
  1874. {
  1875. struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
  1876. if (msg)
  1877. {
  1878. off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
  1879. return dfs_aspace_write(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
  1880. }
  1881. }
  1882. return ret;
  1883. }
  1884. #endif