dfs_pcache.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-05-05 RTT Implement mnt in dfs v2.0
  9. * 2023-10-23 Shell fix synchronization of data to icache
  10. */
  11. #define DBG_TAG "dfs.pcache"
  12. #define DBG_LVL DBG_WARNING
  13. #include <rtdbg.h>
  14. #include "dfs_pcache.h"
  15. #include "dfs_dentry.h"
  16. #include "dfs_mnt.h"
  17. #include "mm_page.h"
  18. #include <mmu.h>
  19. #include <tlb.h>
  20. #include <rthw.h>
  21. #ifdef RT_USING_PAGECACHE
  22. #ifndef RT_PAGECACHE_COUNT
  23. #define RT_PAGECACHE_COUNT 4096
  24. #endif
  25. #ifndef RT_PAGECACHE_ASPACE_COUNT
  26. #define RT_PAGECACHE_ASPACE_COUNT 1024
  27. #endif
  28. #ifndef RT_PAGECACHE_PRELOAD
  29. #define RT_PAGECACHE_PRELOAD 4
  30. #endif
  31. #ifndef RT_PAGECACHE_GC_WORK_LEVEL
  32. #define RT_PAGECACHE_GC_WORK_LEVEL 90
  33. #endif
  34. #ifndef RT_PAGECACHE_GC_STOP_LEVEL
  35. #define RT_PAGECACHE_GC_STOP_LEVEL 70
  36. #endif
  37. #define PCACHE_MQ_GC 1
  38. #define PCACHE_MQ_WB 2
  39. struct dfs_aspace_mmap_obj
  40. {
  41. rt_uint32_t cmd;
  42. struct rt_mailbox *ack;
  43. struct dfs_file *file;
  44. struct rt_varea *varea;
  45. void *data;
  46. };
  47. struct dfs_pcache_mq_obj
  48. {
  49. struct rt_mailbox *ack;
  50. rt_uint32_t cmd;
  51. };
  52. static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos);
  53. static void dfs_page_ref(struct dfs_page *page);
  54. static int dfs_page_inactive(struct dfs_page *page);
  55. static int dfs_page_remove(struct dfs_page *page);
  56. static void dfs_page_release(struct dfs_page *page);
  57. static int dfs_page_dirty(struct dfs_page *page);
  58. static int dfs_aspace_release(struct dfs_aspace *aspace);
  59. static int dfs_aspace_lock(struct dfs_aspace *aspace);
  60. static int dfs_aspace_unlock(struct dfs_aspace *aspace);
  61. static int dfs_pcache_lock(void);
  62. static int dfs_pcache_unlock(void);
  63. static struct dfs_pcache __pcache;
  64. static int dfs_aspace_gc(struct dfs_aspace *aspace, int count)
  65. {
  66. int cnt = count;
  67. if (aspace)
  68. {
  69. dfs_aspace_lock(aspace);
  70. if (aspace->pages_count > 0)
  71. {
  72. struct dfs_page *page = RT_NULL;
  73. rt_list_t *node = aspace->list_inactive.next;
  74. while (cnt && node != &aspace->list_active)
  75. {
  76. page = rt_list_entry(node, struct dfs_page, space_node);
  77. node = node->next;
  78. if (dfs_page_remove(page) == 0)
  79. {
  80. cnt --;
  81. }
  82. }
  83. node = aspace->list_active.next;
  84. while (cnt && node != &aspace->list_inactive)
  85. {
  86. page = rt_list_entry(node, struct dfs_page, space_node);
  87. node = node->next;
  88. if (dfs_page_remove(page) == 0)
  89. {
  90. cnt --;
  91. }
  92. }
  93. }
  94. dfs_aspace_unlock(aspace);
  95. }
  96. return count - cnt;
  97. }
  98. void dfs_pcache_release(size_t count)
  99. {
  100. rt_list_t *node = RT_NULL;
  101. struct dfs_aspace *aspace = RT_NULL;
  102. dfs_pcache_lock();
  103. if (count == 0)
  104. {
  105. count = rt_atomic_load(&(__pcache.pages_count)) - RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_STOP_LEVEL / 100;
  106. }
  107. node = __pcache.list_inactive.next;
  108. while (count && node != &__pcache.list_active)
  109. {
  110. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  111. node = node->next;
  112. if (aspace)
  113. {
  114. count -= dfs_aspace_gc(aspace, count);
  115. dfs_aspace_release(aspace);
  116. }
  117. }
  118. node = __pcache.list_active.next;
  119. while (count && node != &__pcache.list_inactive)
  120. {
  121. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  122. node = node->next;
  123. if (aspace)
  124. {
  125. count -= dfs_aspace_gc(aspace, count);
  126. }
  127. }
  128. dfs_pcache_unlock();
  129. }
  130. void dfs_pcache_unmount(struct dfs_mnt *mnt)
  131. {
  132. rt_list_t *node = RT_NULL;
  133. struct dfs_aspace *aspace = RT_NULL;
  134. dfs_pcache_lock();
  135. node = __pcache.list_inactive.next;
  136. while (node != &__pcache.list_active)
  137. {
  138. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  139. node = node->next;
  140. if (aspace && aspace->mnt == mnt)
  141. {
  142. dfs_aspace_clean(aspace);
  143. dfs_aspace_release(aspace);
  144. }
  145. }
  146. node = __pcache.list_active.next;
  147. while (node != &__pcache.list_inactive)
  148. {
  149. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  150. node = node->next;
  151. if (aspace && aspace->mnt == mnt)
  152. {
  153. dfs_aspace_clean(aspace);
  154. dfs_aspace_release(aspace);
  155. }
  156. }
  157. dfs_pcache_unlock();
  158. }
  159. static int dfs_pcache_limit_check(void)
  160. {
  161. int index = 4;
  162. while (index && rt_atomic_load(&(__pcache.pages_count)) > RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
  163. {
  164. dfs_pcache_release(0);
  165. index --;
  166. }
  167. return 0;
  168. }
  169. static void dfs_pcache_thread(void *parameter)
  170. {
  171. struct dfs_pcache_mq_obj work;
  172. while (1)
  173. {
  174. if (rt_mq_recv(__pcache.mqueue, &work, sizeof(work), RT_WAITING_FOREVER) == sizeof(work))
  175. {
  176. if (work.cmd == PCACHE_MQ_GC)
  177. {
  178. dfs_pcache_limit_check();
  179. }
  180. else if (work.cmd == PCACHE_MQ_WB)
  181. {
  182. int count = 0;
  183. rt_list_t *node;
  184. struct dfs_page *page = 0;
  185. while (1)
  186. {
  187. /* try to get dirty page */
  188. dfs_pcache_lock();
  189. page = 0;
  190. rt_list_for_each(node, &__pcache.list_active)
  191. {
  192. if (node != &__pcache.list_inactive)
  193. {
  194. struct dfs_aspace *aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  195. dfs_aspace_lock(aspace);
  196. if (aspace->list_dirty.next != &aspace->list_dirty)
  197. {
  198. page = rt_list_entry(aspace->list_dirty.next, struct dfs_page, dirty_node);
  199. dfs_page_ref(page);
  200. dfs_aspace_unlock(aspace);
  201. break;
  202. }
  203. else
  204. {
  205. page = RT_NULL;
  206. }
  207. dfs_aspace_unlock(aspace);
  208. }
  209. }
  210. dfs_pcache_unlock();
  211. if (page)
  212. {
  213. struct dfs_aspace *aspace = page->aspace;
  214. dfs_aspace_lock(aspace);
  215. if (page->is_dirty == 1 && aspace->vnode)
  216. {
  217. if (rt_tick_get_millisecond() - page->tick_ms >= 500)
  218. {
  219. if (aspace->vnode->size < page->fpos + page->size)
  220. {
  221. page->len = aspace->vnode->size - page->fpos;
  222. }
  223. else
  224. {
  225. page->len = page->size;
  226. }
  227. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
  228. if (aspace->ops->write)
  229. {
  230. aspace->ops->write(page);
  231. }
  232. page->is_dirty = 0;
  233. if (page->dirty_node.next != RT_NULL)
  234. {
  235. rt_list_remove(&page->dirty_node);
  236. page->dirty_node.next = RT_NULL;
  237. }
  238. }
  239. }
  240. dfs_page_release(page);
  241. dfs_aspace_unlock(aspace);
  242. }
  243. else
  244. {
  245. break;
  246. }
  247. rt_thread_mdelay(5);
  248. count ++;
  249. if (count >= 4)
  250. {
  251. break;
  252. }
  253. }
  254. }
  255. }
  256. }
  257. }
  258. static int dfs_pcache_init(void)
  259. {
  260. rt_thread_t tid;
  261. for (int i = 0; i < RT_PAGECACHE_HASH_NR; i++)
  262. {
  263. rt_list_init(&__pcache.head[i]);
  264. }
  265. rt_list_init(&__pcache.list_active);
  266. rt_list_init(&__pcache.list_inactive);
  267. rt_list_insert_after(&__pcache.list_active, &__pcache.list_inactive);
  268. rt_atomic_store(&(__pcache.pages_count), 0);
  269. rt_mutex_init(&__pcache.lock, "pcache", RT_IPC_FLAG_PRIO);
  270. __pcache.mqueue = rt_mq_create("pcache", sizeof(struct dfs_pcache_mq_obj), 1024, RT_IPC_FLAG_FIFO);
  271. tid = rt_thread_create("pcache", dfs_pcache_thread, 0, 8192, 25, 5);
  272. if (tid)
  273. {
  274. rt_thread_startup(tid);
  275. }
  276. __pcache.last_time_wb = rt_tick_get_millisecond();
  277. return 0;
  278. }
  279. INIT_PREV_EXPORT(dfs_pcache_init);
  280. static rt_ubase_t dfs_pcache_mq_work(rt_uint32_t cmd)
  281. {
  282. rt_err_t err;
  283. struct dfs_pcache_mq_obj work = { 0 };
  284. work.cmd = cmd;
  285. err = rt_mq_send_wait(__pcache.mqueue, (const void *)&work, sizeof(struct dfs_pcache_mq_obj), 0);
  286. return err;
  287. }
  288. static int dfs_pcache_lock(void)
  289. {
  290. rt_mutex_take(&__pcache.lock, RT_WAITING_FOREVER);
  291. return 0;
  292. }
  293. static int dfs_pcache_unlock(void)
  294. {
  295. rt_mutex_release(&__pcache.lock);
  296. return 0;
  297. }
  298. static uint32_t dfs_aspace_hash(struct dfs_mnt *mnt, const char *path)
  299. {
  300. uint32_t val = 0;
  301. if (path)
  302. {
  303. while (*path)
  304. {
  305. val = ((val << 5) + val) + *path++;
  306. }
  307. }
  308. return (val ^ (unsigned long)mnt) & (RT_PAGECACHE_HASH_NR - 1);
  309. }
  310. static struct dfs_aspace *dfs_aspace_hash_lookup(struct dfs_dentry *dentry, const struct dfs_aspace_ops *ops)
  311. {
  312. struct dfs_aspace *aspace = RT_NULL;
  313. dfs_pcache_lock();
  314. rt_list_for_each_entry(aspace, &__pcache.head[dfs_aspace_hash(dentry->mnt, dentry->pathname)], hash_node)
  315. {
  316. if (aspace->mnt == dentry->mnt
  317. && aspace->ops == ops
  318. && !strcmp(aspace->pathname, dentry->pathname))
  319. {
  320. rt_atomic_add(&aspace->ref_count, 1);
  321. dfs_pcache_unlock();
  322. return aspace;
  323. }
  324. }
  325. dfs_pcache_unlock();
  326. return RT_NULL;
  327. }
  328. static void dfs_aspace_insert(struct dfs_aspace *aspace)
  329. {
  330. uint32_t val = 0;
  331. val = dfs_aspace_hash(aspace->mnt, aspace->pathname);
  332. dfs_pcache_lock();
  333. rt_atomic_add(&aspace->ref_count, 1);
  334. rt_list_insert_after(&__pcache.head[val], &aspace->hash_node);
  335. rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
  336. dfs_pcache_unlock();
  337. }
  338. static void dfs_aspace_remove(struct dfs_aspace *aspace)
  339. {
  340. dfs_pcache_lock();
  341. if (aspace->hash_node.next != RT_NULL)
  342. {
  343. rt_list_remove(&aspace->hash_node);
  344. }
  345. if (aspace->cache_node.next != RT_NULL)
  346. {
  347. rt_list_remove(&aspace->cache_node);
  348. }
  349. dfs_pcache_unlock();
  350. }
  351. static void dfs_aspace_active(struct dfs_aspace *aspace)
  352. {
  353. dfs_pcache_lock();
  354. if (aspace->cache_node.next != RT_NULL)
  355. {
  356. rt_list_remove(&aspace->cache_node);
  357. rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
  358. }
  359. dfs_pcache_unlock();
  360. }
  361. static void dfs_aspace_inactive(struct dfs_aspace *aspace)
  362. {
  363. dfs_pcache_lock();
  364. if (aspace->cache_node.next != RT_NULL)
  365. {
  366. rt_list_remove(&aspace->cache_node);
  367. rt_list_insert_before(&__pcache.list_active, &aspace->cache_node);
  368. }
  369. dfs_pcache_unlock();
  370. }
  371. static struct dfs_aspace *_dfs_aspace_create(struct dfs_dentry *dentry,
  372. struct dfs_vnode *vnode,
  373. const struct dfs_aspace_ops *ops)
  374. {
  375. struct dfs_aspace *aspace;
  376. aspace = rt_calloc(1, sizeof(struct dfs_aspace));
  377. if (aspace)
  378. {
  379. rt_list_init(&aspace->list_active);
  380. rt_list_init(&aspace->list_inactive);
  381. rt_list_init(&aspace->list_dirty);
  382. rt_list_insert_after(&aspace->list_active, &aspace->list_inactive);
  383. aspace->avl_root.root_node = 0;
  384. aspace->avl_page = 0;
  385. rt_mutex_init(&aspace->lock, rt_thread_self()->parent.name, RT_IPC_FLAG_PRIO);
  386. rt_atomic_store(&aspace->ref_count, 1);
  387. aspace->pages_count = 0;
  388. aspace->vnode = vnode;
  389. aspace->ops = ops;
  390. if (dentry && dentry->mnt)
  391. {
  392. aspace->mnt = dentry->mnt;
  393. aspace->fullpath = rt_strdup(dentry->mnt->fullpath);
  394. aspace->pathname = rt_strdup(dentry->pathname);
  395. }
  396. dfs_aspace_insert(aspace);
  397. }
  398. return aspace;
  399. }
  400. struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry,
  401. struct dfs_vnode *vnode,
  402. const struct dfs_aspace_ops *ops)
  403. {
  404. struct dfs_aspace *aspace = RT_NULL;
  405. RT_ASSERT(vnode && ops);
  406. dfs_pcache_lock();
  407. if (dentry)
  408. {
  409. aspace = dfs_aspace_hash_lookup(dentry, ops);
  410. }
  411. if (!aspace)
  412. {
  413. aspace = _dfs_aspace_create(dentry, vnode, ops);
  414. }
  415. else
  416. {
  417. aspace->vnode = vnode;
  418. dfs_aspace_active(aspace);
  419. }
  420. dfs_pcache_unlock();
  421. return aspace;
  422. }
  423. int dfs_aspace_destroy(struct dfs_aspace *aspace)
  424. {
  425. int ret = -EINVAL;
  426. if (aspace)
  427. {
  428. dfs_pcache_lock();
  429. dfs_aspace_lock(aspace);
  430. rt_atomic_sub(&aspace->ref_count, 1);
  431. RT_ASSERT(rt_atomic_load(&aspace->ref_count) > 0);
  432. dfs_aspace_inactive(aspace);
  433. aspace->vnode = RT_NULL;
  434. if (dfs_aspace_release(aspace) != 0)
  435. {
  436. dfs_aspace_unlock(aspace);
  437. }
  438. dfs_pcache_unlock();
  439. }
  440. return ret;
  441. }
  442. static int dfs_aspace_release(struct dfs_aspace *aspace)
  443. {
  444. int ret = -1;
  445. if (aspace)
  446. {
  447. dfs_pcache_lock();
  448. dfs_aspace_lock(aspace);
  449. if (rt_atomic_load(&aspace->ref_count) == 1 && aspace->pages_count == 0)
  450. {
  451. dfs_aspace_remove(aspace);
  452. if (aspace->fullpath)
  453. {
  454. rt_free(aspace->fullpath);
  455. }
  456. if (aspace->pathname)
  457. {
  458. rt_free(aspace->pathname);
  459. }
  460. rt_mutex_detach(&aspace->lock);
  461. rt_free(aspace);
  462. ret = 0;
  463. }
  464. else
  465. {
  466. dfs_aspace_unlock(aspace);
  467. }
  468. dfs_pcache_unlock();
  469. }
  470. return ret;
  471. }
  472. static int _dfs_aspace_dump(struct dfs_aspace *aspace, int is_dirty)
  473. {
  474. if (aspace)
  475. {
  476. rt_list_t *next;
  477. struct dfs_page *page;
  478. dfs_aspace_lock(aspace);
  479. if (aspace->pages_count > 0)
  480. {
  481. rt_list_for_each(next, &aspace->list_inactive)
  482. {
  483. if (next != &aspace->list_active)
  484. {
  485. page = rt_list_entry(next, struct dfs_page, space_node);
  486. if (is_dirty && page->is_dirty)
  487. {
  488. rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
  489. }
  490. else if (is_dirty == 0)
  491. {
  492. rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
  493. }
  494. }
  495. }
  496. }
  497. else
  498. {
  499. rt_kprintf(" pages >> empty\n");
  500. }
  501. dfs_aspace_unlock(aspace);
  502. }
  503. return 0;
  504. }
  505. static int dfs_pcache_dump(int argc, char **argv)
  506. {
  507. int dump = 0;
  508. rt_list_t *node;
  509. struct dfs_aspace *aspace;
  510. if (argc == 2)
  511. {
  512. if (strcmp(argv[1], "--dump") == 0)
  513. {
  514. dump = 1;
  515. }
  516. else if (strcmp(argv[1], "--dirty") == 0)
  517. {
  518. dump = 2;
  519. }
  520. else
  521. {
  522. rt_kprintf("dfs page cache dump\n");
  523. rt_kprintf("usage: dfs_cache\n");
  524. rt_kprintf(" dfs_cache --dump\n");
  525. rt_kprintf(" dfs_cache --dirty\n");
  526. return 0;
  527. }
  528. }
  529. dfs_pcache_lock();
  530. rt_kprintf("total pages count: %d / %d\n", rt_atomic_load(&(__pcache.pages_count)), RT_PAGECACHE_COUNT);
  531. rt_list_for_each(node, &__pcache.list_active)
  532. {
  533. if (node != &__pcache.list_inactive)
  534. {
  535. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  536. if (aspace->mnt)
  537. {
  538. rt_kprintf("file: %s%s pages: %d\n", aspace->fullpath, aspace->pathname, aspace->pages_count);
  539. }
  540. else
  541. {
  542. rt_kprintf("unknown type, pages: %d\n", aspace->pages_count);
  543. }
  544. if (dump > 0)
  545. {
  546. _dfs_aspace_dump(aspace, dump == 2 ? 1 : 0);
  547. }
  548. }
  549. }
  550. dfs_pcache_unlock();
  551. return 0;
  552. }
  553. MSH_CMD_EXPORT_ALIAS(dfs_pcache_dump, dfs_cache, dump dfs page cache);
  554. static int dfs_page_unmap(struct dfs_page *page)
  555. {
  556. rt_list_t *next;
  557. struct dfs_mmap *map;
  558. next = page->mmap_head.next;
  559. if (next != &page->mmap_head && page->fpos < page->aspace->vnode->size)
  560. {
  561. dfs_page_dirty(page);
  562. }
  563. while (next != &page->mmap_head)
  564. {
  565. map = rt_list_entry(next, struct dfs_mmap, mmap_node);
  566. next = next->next;
  567. if (map)
  568. {
  569. void *vaddr = dfs_aspace_vaddr(map->varea, page->fpos);
  570. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
  571. rt_varea_unmap_page(map->varea, vaddr);
  572. rt_free(map);
  573. }
  574. }
  575. rt_list_init(&page->mmap_head);
  576. return 0;
  577. }
  578. static struct dfs_page *dfs_page_create(void)
  579. {
  580. struct dfs_page *page = RT_NULL;
  581. page = rt_calloc(1, sizeof(struct dfs_page));
  582. if (page)
  583. {
  584. page->page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
  585. if (page->page)
  586. {
  587. //memset(page->page, 0x00, ARCH_PAGE_SIZE);
  588. rt_list_init(&page->mmap_head);
  589. rt_atomic_store(&(page->ref_count), 1);
  590. }
  591. else
  592. {
  593. LOG_E("page alloc failed!\n");
  594. rt_free(page);
  595. page = RT_NULL;
  596. }
  597. }
  598. return page;
  599. }
  600. static void dfs_page_ref(struct dfs_page *page)
  601. {
  602. rt_atomic_add(&(page->ref_count), 1);
  603. }
  604. static void dfs_page_release(struct dfs_page *page)
  605. {
  606. struct dfs_aspace *aspace = page->aspace;
  607. dfs_aspace_lock(aspace);
  608. rt_atomic_sub(&(page->ref_count), 1);
  609. if (rt_atomic_load(&(page->ref_count)) == 0)
  610. {
  611. dfs_page_unmap(page);
  612. if (page->is_dirty == 1 && aspace->vnode)
  613. {
  614. if (aspace->vnode->size < page->fpos + page->size)
  615. {
  616. page->len = aspace->vnode->size - page->fpos;
  617. }
  618. else
  619. {
  620. page->len = page->size;
  621. }
  622. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
  623. if (aspace->ops->write)
  624. {
  625. aspace->ops->write(page);
  626. }
  627. page->is_dirty = 0;
  628. }
  629. RT_ASSERT(page->is_dirty == 0);
  630. rt_pages_free(page->page, 0);
  631. page->page = RT_NULL;
  632. rt_free(page);
  633. }
  634. dfs_aspace_unlock(aspace);
  635. }
  636. static int dfs_page_compare(off_t fpos, off_t value)
  637. {
  638. return fpos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE - value;
  639. }
  640. static int _dfs_page_insert(struct dfs_aspace *aspace, struct dfs_page *page)
  641. {
  642. struct dfs_page *tmp;
  643. struct util_avl_struct *current = NULL;
  644. struct util_avl_struct **next = &(aspace->avl_root.root_node);
  645. /* Figure out where to put new node */
  646. while (*next)
  647. {
  648. current = *next;
  649. tmp = rt_container_of(current, struct dfs_page, avl_node);
  650. if (page->fpos < tmp->fpos)
  651. next = &(current->avl_left);
  652. else if (page->fpos > tmp->fpos)
  653. next = &(current->avl_right);
  654. else
  655. return -1;
  656. }
  657. /* Add new node and rebalance tree. */
  658. util_avl_link(&page->avl_node, current, next);
  659. util_avl_rebalance(current, &aspace->avl_root);
  660. aspace->avl_page = page;
  661. return 0;
  662. }
  663. static void _dfs_page_remove(struct dfs_aspace *aspace, struct dfs_page *page)
  664. {
  665. if (aspace->avl_page && aspace->avl_page == page)
  666. {
  667. aspace->avl_page = 0;
  668. }
  669. util_avl_remove(&page->avl_node, &aspace->avl_root);
  670. }
  671. static int dfs_aspace_lock(struct dfs_aspace *aspace)
  672. {
  673. rt_mutex_take(&aspace->lock, RT_WAITING_FOREVER);
  674. return 0;
  675. }
  676. static int dfs_aspace_unlock(struct dfs_aspace *aspace)
  677. {
  678. rt_mutex_release(&aspace->lock);
  679. return 0;
  680. }
  681. static int dfs_page_insert(struct dfs_page *page)
  682. {
  683. struct dfs_aspace *aspace = page->aspace;
  684. dfs_aspace_lock(aspace);
  685. rt_list_insert_before(&aspace->list_inactive, &page->space_node);
  686. aspace->pages_count ++;
  687. if (_dfs_page_insert(aspace, page))
  688. {
  689. RT_ASSERT(0);
  690. }
  691. if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
  692. {
  693. rt_list_t *next = aspace->list_active.next;
  694. if (next != &aspace->list_inactive)
  695. {
  696. struct dfs_page *tmp = rt_list_entry(next, struct dfs_page, space_node);
  697. dfs_page_inactive(tmp);
  698. }
  699. }
  700. rt_atomic_add(&(__pcache.pages_count), 1);
  701. dfs_aspace_unlock(aspace);
  702. return 0;
  703. }
  704. static int dfs_page_remove(struct dfs_page *page)
  705. {
  706. int ret = -1;
  707. struct dfs_aspace *aspace = page->aspace;
  708. dfs_aspace_lock(aspace);
  709. if (rt_atomic_load(&(page->ref_count)) == 1)
  710. {
  711. if (page->space_node.next != RT_NULL)
  712. {
  713. rt_list_remove(&page->space_node);
  714. page->space_node.next = RT_NULL;
  715. aspace->pages_count--;
  716. _dfs_page_remove(aspace, page);
  717. }
  718. if (page->dirty_node.next != RT_NULL)
  719. {
  720. rt_list_remove(&page->dirty_node);
  721. page->dirty_node.next = RT_NULL;
  722. }
  723. rt_atomic_sub(&(__pcache.pages_count), 1);
  724. dfs_page_release(page);
  725. ret = 0;
  726. }
  727. dfs_aspace_unlock(aspace);
  728. return ret;
  729. }
  730. static int dfs_page_active(struct dfs_page *page)
  731. {
  732. struct dfs_aspace *aspace = page->aspace;
  733. dfs_aspace_lock(aspace);
  734. if (page->space_node.next != RT_NULL)
  735. {
  736. rt_list_remove(&page->space_node);
  737. rt_list_insert_before(&aspace->list_inactive, &page->space_node);
  738. }
  739. dfs_aspace_unlock(aspace);
  740. return 0;
  741. }
  742. static int dfs_page_inactive(struct dfs_page *page)
  743. {
  744. struct dfs_aspace *aspace = page->aspace;
  745. dfs_aspace_lock(aspace);
  746. if (page->space_node.next != RT_NULL)
  747. {
  748. rt_list_remove(&page->space_node);
  749. rt_list_insert_before(&aspace->list_active, &page->space_node);
  750. }
  751. dfs_aspace_unlock(aspace);
  752. return 0;
  753. }
  754. static int dfs_page_dirty(struct dfs_page *page)
  755. {
  756. struct dfs_aspace *aspace = page->aspace;
  757. dfs_aspace_lock(aspace);
  758. if (page->dirty_node.next == RT_NULL && page->space_node.next != RT_NULL)
  759. {
  760. rt_list_insert_before(&aspace->list_dirty, &page->dirty_node);
  761. }
  762. page->is_dirty = 1;
  763. page->tick_ms = rt_tick_get_millisecond();
  764. if (rt_tick_get_millisecond() - __pcache.last_time_wb >= 1000)
  765. {
  766. dfs_pcache_mq_work(PCACHE_MQ_WB);
  767. __pcache.last_time_wb = rt_tick_get_millisecond();
  768. }
  769. dfs_aspace_unlock(aspace);
  770. return 0;
  771. }
  772. static struct dfs_page *dfs_page_search(struct dfs_aspace *aspace, off_t fpos)
  773. {
  774. int cmp;
  775. struct dfs_page *page;
  776. struct util_avl_struct *avl_node;
  777. dfs_aspace_lock(aspace);
  778. if (aspace->avl_page && dfs_page_compare(fpos, aspace->avl_page->fpos) == 0)
  779. {
  780. page = aspace->avl_page;
  781. dfs_page_active(page);
  782. dfs_page_ref(page);
  783. dfs_aspace_unlock(aspace);
  784. return page;
  785. }
  786. avl_node = aspace->avl_root.root_node;
  787. while (avl_node)
  788. {
  789. page = rt_container_of(avl_node, struct dfs_page, avl_node);
  790. cmp = dfs_page_compare(fpos, page->fpos);
  791. if (cmp < 0)
  792. {
  793. avl_node = avl_node->avl_left;
  794. }
  795. else if (cmp > 0)
  796. {
  797. avl_node = avl_node->avl_right;
  798. }
  799. else
  800. {
  801. aspace->avl_page = page;
  802. dfs_page_active(page);
  803. dfs_page_ref(page);
  804. dfs_aspace_unlock(aspace);
  805. return page;
  806. }
  807. }
  808. dfs_aspace_unlock(aspace);
  809. return RT_NULL;
  810. }
  811. static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
  812. {
  813. struct dfs_page *page = RT_NULL;
  814. if (file && file->vnode && file->vnode->aspace)
  815. {
  816. struct dfs_vnode *vnode = file->vnode;
  817. struct dfs_aspace *aspace = vnode->aspace;
  818. page = dfs_page_create();
  819. if (page)
  820. {
  821. page->aspace = aspace;
  822. page->size = ARCH_PAGE_SIZE;
  823. page->fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
  824. aspace->ops->read(file, page);
  825. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
  826. page->ref_count ++;
  827. dfs_page_insert(page);
  828. }
  829. }
  830. return page;
  831. }
  832. static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos)
  833. {
  834. struct dfs_page *page = RT_NULL;
  835. struct dfs_aspace *aspace = file->vnode->aspace;
  836. dfs_aspace_lock(aspace);
  837. page = dfs_page_search(aspace, pos);
  838. if (!page)
  839. {
  840. int count = RT_PAGECACHE_PRELOAD;
  841. struct dfs_page *tmp = RT_NULL;
  842. off_t fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
  843. do
  844. {
  845. page = dfs_aspace_load_page(file, fpos);
  846. if (page)
  847. {
  848. if (tmp == RT_NULL)
  849. {
  850. tmp = page;
  851. }
  852. else
  853. {
  854. dfs_page_release(page);
  855. }
  856. }
  857. else
  858. {
  859. break;
  860. }
  861. fpos += ARCH_PAGE_SIZE;
  862. page = dfs_page_search(aspace, fpos);
  863. if (page)
  864. {
  865. dfs_page_release(page);
  866. }
  867. count --;
  868. } while (count && page == RT_NULL);
  869. page = tmp;
  870. if (page)
  871. {
  872. dfs_aspace_unlock(aspace);
  873. if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT)
  874. {
  875. dfs_pcache_limit_check();
  876. }
  877. else if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
  878. {
  879. dfs_pcache_mq_work(PCACHE_MQ_GC);
  880. }
  881. return page;
  882. }
  883. }
  884. dfs_aspace_unlock(aspace);
  885. return page;
  886. }
  887. int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  888. {
  889. int ret = -EINVAL;
  890. if (file && file->vnode && file->vnode->aspace)
  891. {
  892. if (!(file->vnode->aspace->ops->read))
  893. return ret;
  894. struct dfs_vnode *vnode = file->vnode;
  895. struct dfs_aspace *aspace = vnode->aspace;
  896. struct dfs_page *page;
  897. char *ptr = (char *)buf;
  898. ret = 0;
  899. while (count)
  900. {
  901. page = dfs_page_lookup(file, *pos);
  902. if (page)
  903. {
  904. off_t len;
  905. dfs_aspace_lock(aspace);
  906. if (aspace->vnode->size < page->fpos + ARCH_PAGE_SIZE)
  907. {
  908. len = aspace->vnode->size - *pos;
  909. }
  910. else
  911. {
  912. len = page->fpos + ARCH_PAGE_SIZE - *pos;
  913. }
  914. len = count > len ? len : count;
  915. if (len)
  916. {
  917. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
  918. rt_memcpy(ptr, page->page + *pos - page->fpos, len);
  919. ptr += len;
  920. *pos += len;
  921. count -= len;
  922. ret += len;
  923. }
  924. else
  925. {
  926. dfs_page_release(page);
  927. dfs_aspace_unlock(aspace);
  928. break;
  929. }
  930. dfs_page_release(page);
  931. dfs_aspace_unlock(aspace);
  932. }
  933. else
  934. {
  935. break;
  936. }
  937. }
  938. }
  939. return ret;
  940. }
  941. int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  942. {
  943. int ret = -EINVAL;
  944. if (file && file->vnode && file->vnode->aspace)
  945. {
  946. if (!(file->vnode->aspace->ops->write))
  947. return ret;
  948. struct dfs_vnode *vnode = file->vnode;
  949. struct dfs_aspace *aspace = vnode->aspace;
  950. struct dfs_page *page;
  951. char *ptr = (char *)buf;
  952. ret = 0;
  953. while (count)
  954. {
  955. page = dfs_page_lookup(file, *pos);
  956. if (page)
  957. {
  958. off_t len;
  959. dfs_aspace_lock(aspace);
  960. len = page->fpos + ARCH_PAGE_SIZE - *pos;
  961. len = count > len ? len : count;
  962. rt_memcpy(page->page + *pos - page->fpos, ptr, len);
  963. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
  964. ptr += len;
  965. *pos += len;
  966. count -= len;
  967. ret += len;
  968. if (*pos > aspace->vnode->size)
  969. {
  970. aspace->vnode->size = *pos;
  971. }
  972. if (file->flags & O_SYNC)
  973. {
  974. if (aspace->vnode->size < page->fpos + page->size)
  975. {
  976. page->len = aspace->vnode->size - page->fpos;
  977. }
  978. else
  979. {
  980. page->len = page->size;
  981. }
  982. aspace->ops->write(page);
  983. page->is_dirty = 0;
  984. }
  985. else
  986. {
  987. dfs_page_dirty(page);
  988. }
  989. dfs_page_release(page);
  990. dfs_aspace_unlock(aspace);
  991. }
  992. else
  993. {
  994. break;
  995. }
  996. }
  997. }
  998. return ret;
  999. }
  1000. int dfs_aspace_flush(struct dfs_aspace *aspace)
  1001. {
  1002. if (aspace)
  1003. {
  1004. rt_list_t *next;
  1005. struct dfs_page *page;
  1006. dfs_aspace_lock(aspace);
  1007. if (aspace->pages_count > 0 && aspace->vnode)
  1008. {
  1009. rt_list_for_each(next, &aspace->list_dirty)
  1010. {
  1011. page = rt_list_entry(next, struct dfs_page, dirty_node);
  1012. if (page->is_dirty == 1 && aspace->vnode)
  1013. {
  1014. if (aspace->vnode->size < page->fpos + page->size)
  1015. {
  1016. page->len = aspace->vnode->size - page->fpos;
  1017. }
  1018. else
  1019. {
  1020. page->len = page->size;
  1021. }
  1022. //rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
  1023. if (aspace->ops->write)
  1024. {
  1025. aspace->ops->write(page);
  1026. }
  1027. page->is_dirty = 0;
  1028. }
  1029. RT_ASSERT(page->is_dirty == 0);
  1030. }
  1031. }
  1032. dfs_aspace_unlock(aspace);
  1033. }
  1034. return 0;
  1035. }
  1036. int dfs_aspace_clean(struct dfs_aspace *aspace)
  1037. {
  1038. if (aspace)
  1039. {
  1040. dfs_aspace_lock(aspace);
  1041. if (aspace->pages_count > 0)
  1042. {
  1043. rt_list_t *next = aspace->list_active.next;
  1044. struct dfs_page *page;
  1045. while (next && next != &aspace->list_active)
  1046. {
  1047. if (next == &aspace->list_inactive)
  1048. {
  1049. next = next->next;
  1050. continue;
  1051. }
  1052. page = rt_list_entry(next, struct dfs_page, space_node);
  1053. next = next->next;
  1054. dfs_page_remove(page);
  1055. }
  1056. }
  1057. dfs_aspace_unlock(aspace);
  1058. }
  1059. return 0;
  1060. }
  1061. void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1062. {
  1063. void *ret = RT_NULL;
  1064. struct dfs_page *page;
  1065. struct dfs_aspace *aspace = file->vnode->aspace;
  1066. page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
  1067. if (page)
  1068. {
  1069. struct dfs_mmap *map = (struct dfs_mmap *)rt_calloc(1, sizeof(struct dfs_mmap));
  1070. if (map)
  1071. {
  1072. void *pg_paddr = rt_kmem_v2p(page->page);
  1073. int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
  1074. if (err == RT_EOK)
  1075. {
  1076. /**
  1077. * Note: While the page is mapped into user area, the data writing into the page
  1078. * is not guaranteed to be visible for machines with the *weak* memory model and
  1079. * those Harvard architecture (especially for those ARM64) cores for their
  1080. * out-of-order pipelines of data buffer. Besides if the instruction cache in the
  1081. * L1 memory system is a VIPT cache, there are chances to have the alias matching
  1082. * entry if we reuse the same page frame and map it into the same virtual address
  1083. * of the previous one.
  1084. *
  1085. * That's why we have to do synchronization and cleanup manually to ensure that
  1086. * fetching of the next instruction can see the coherent data with the data cache,
  1087. * TLB, MMU, main memory, and all the other observers in the computer system.
  1088. */
  1089. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
  1090. ret = page->page;
  1091. map->varea = varea;
  1092. dfs_aspace_lock(aspace);
  1093. rt_list_insert_after(&page->mmap_head, &map->mmap_node);
  1094. dfs_page_release(page);
  1095. dfs_aspace_unlock(aspace);
  1096. }
  1097. else
  1098. {
  1099. dfs_page_release(page);
  1100. rt_free(map);
  1101. }
  1102. }
  1103. else
  1104. {
  1105. dfs_page_release(page);
  1106. }
  1107. }
  1108. return ret;
  1109. }
  1110. int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
  1111. {
  1112. struct dfs_vnode *vnode = file->vnode;
  1113. struct dfs_aspace *aspace = vnode->aspace;
  1114. if (aspace)
  1115. {
  1116. rt_list_t *next;
  1117. struct dfs_page *page;
  1118. dfs_aspace_lock(aspace);
  1119. if (aspace->pages_count > 0)
  1120. {
  1121. rt_list_for_each(next, &aspace->list_active)
  1122. {
  1123. if (next != &aspace->list_inactive)
  1124. {
  1125. page = rt_list_entry(next, struct dfs_page, space_node);
  1126. if (page)
  1127. {
  1128. rt_list_t *node, *tmp;
  1129. struct dfs_mmap *map;
  1130. node = page->mmap_head.next;
  1131. while (node != &page->mmap_head)
  1132. {
  1133. map = rt_list_entry(node, struct dfs_mmap, mmap_node);
  1134. tmp = node;
  1135. node = node->next;
  1136. if (map && varea == map->varea)
  1137. {
  1138. void *vaddr = dfs_aspace_vaddr(map->varea, page->fpos);
  1139. rt_varea_unmap_page(map->varea, vaddr);
  1140. if (varea->attr == MMU_MAP_U_RWCB && page->fpos < page->aspace->vnode->size)
  1141. {
  1142. dfs_page_dirty(page);
  1143. }
  1144. rt_list_remove(tmp);
  1145. rt_free(map);
  1146. break;
  1147. }
  1148. }
  1149. }
  1150. }
  1151. }
  1152. }
  1153. dfs_aspace_unlock(aspace);
  1154. }
  1155. return 0;
  1156. }
  1157. int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1158. {
  1159. struct dfs_page *page;
  1160. struct dfs_aspace *aspace = file->vnode->aspace;
  1161. if (aspace)
  1162. {
  1163. dfs_aspace_lock(aspace);
  1164. page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
  1165. if (page)
  1166. {
  1167. rt_list_t *node, *tmp;
  1168. struct dfs_mmap *map;
  1169. rt_varea_unmap_page(varea, vaddr);
  1170. node = page->mmap_head.next;
  1171. while (node != &page->mmap_head)
  1172. {
  1173. map = rt_list_entry(node, struct dfs_mmap, mmap_node);
  1174. tmp = node;
  1175. node = node->next;
  1176. if (map && varea == map->varea)
  1177. {
  1178. if (varea->attr == MMU_MAP_U_RWCB)
  1179. {
  1180. dfs_page_dirty(page);
  1181. }
  1182. rt_list_remove(tmp);
  1183. rt_free(map);
  1184. break;
  1185. }
  1186. }
  1187. dfs_page_release(page);
  1188. }
  1189. dfs_aspace_unlock(aspace);
  1190. }
  1191. return 0;
  1192. }
  1193. int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1194. {
  1195. struct dfs_page *page;
  1196. struct dfs_aspace *aspace = file->vnode->aspace;
  1197. if (aspace)
  1198. {
  1199. dfs_aspace_lock(aspace);
  1200. page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
  1201. if (page)
  1202. {
  1203. dfs_page_dirty(page);
  1204. dfs_page_release(page);
  1205. }
  1206. dfs_aspace_unlock(aspace);
  1207. }
  1208. return 0;
  1209. }
  1210. off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr)
  1211. {
  1212. return (off_t)(intptr_t)vaddr - (off_t)(intptr_t)varea->start + varea->offset * ARCH_PAGE_SIZE;
  1213. }
  1214. void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos)
  1215. {
  1216. return varea->start + fpos - varea->offset * ARCH_PAGE_SIZE;
  1217. }
  1218. int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data)
  1219. {
  1220. int ret = 0;
  1221. if (file && varea)
  1222. {
  1223. struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
  1224. if (msg)
  1225. {
  1226. off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
  1227. return dfs_aspace_read(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
  1228. }
  1229. }
  1230. return ret;
  1231. }
  1232. int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data)
  1233. {
  1234. int ret = 0;
  1235. if (file && varea)
  1236. {
  1237. struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
  1238. if (msg)
  1239. {
  1240. off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
  1241. return dfs_aspace_write(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
  1242. }
  1243. }
  1244. return ret;
  1245. }
  1246. #endif