virtio.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-11-11 GuEe-GUI the first version
  9. * 2023-10-12 fangjianzhou support SDL2
  10. */
  11. #include <rtthread.h>
  12. #include <cpuport.h>
  13. #include <virtio.h>
  14. rt_inline void _virtio_dev_check(struct virtio_device *dev)
  15. {
  16. RT_ASSERT(dev != RT_NULL);
  17. RT_ASSERT(dev->mmio_config != RT_NULL);
  18. }
  19. void virtio_reset_device(struct virtio_device *dev)
  20. {
  21. _virtio_dev_check(dev);
  22. dev->mmio_config->status = 0;
  23. }
  24. void virtio_status_acknowledge_driver(struct virtio_device *dev)
  25. {
  26. _virtio_dev_check(dev);
  27. dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
  28. }
  29. void virtio_status_driver_ok(struct virtio_device *dev)
  30. {
  31. _virtio_dev_check(dev);
  32. if (dev->version == 1)
  33. {
  34. /* Legacy virtio */
  35. dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK;
  36. }
  37. else
  38. {
  39. /* Modern virtio: set FEATURES_OK and verify it */
  40. dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK;
  41. /* Verify that device accepted the features */
  42. if (!(dev->mmio_config->status & VIRTIO_STATUS_FEATURES_OK))
  43. {
  44. /* Device doesn't support our feature subset */
  45. dev->mmio_config->status |= VIRTIO_STATUS_FAILED;
  46. return;
  47. }
  48. /* Now set DRIVER_OK */
  49. dev->mmio_config->status |= VIRTIO_STATUS_DRIVER_OK;
  50. }
  51. }
  52. void virtio_interrupt_ack(struct virtio_device *dev)
  53. {
  54. rt_uint32_t status;
  55. _virtio_dev_check(dev);
  56. status = dev->mmio_config->interrupt_status;
  57. if (status != 0)
  58. {
  59. dev->mmio_config->interrupt_ack = status;
  60. }
  61. }
  62. rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit)
  63. {
  64. _virtio_dev_check(dev);
  65. if (dev->version == 1)
  66. {
  67. /* Legacy: 32-bit feature bits only */
  68. return !!(dev->mmio_config->device_features & (1UL << feature_bit));
  69. }
  70. else
  71. {
  72. /* Modern: Use 64-bit feature access */
  73. rt_uint64_t features = virtio_get_features(dev);
  74. return !!(features & (1ULL << feature_bit));
  75. }
  76. }
  77. rt_uint64_t virtio_get_features(struct virtio_device *dev)
  78. {
  79. rt_uint64_t features = 0;
  80. _virtio_dev_check(dev);
  81. if (dev->version == 1)
  82. {
  83. /* Legacy: only lower 32 bits */
  84. features = dev->mmio_config->device_features;
  85. }
  86. else
  87. {
  88. /* Modern: read both 32-bit halves */
  89. dev->mmio_config->device_features_sel = 0;
  90. features = dev->mmio_config->device_features;
  91. dev->mmio_config->device_features_sel = 1;
  92. features |= ((rt_uint64_t)dev->mmio_config->device_features) << 32;
  93. }
  94. return features;
  95. }
  96. void virtio_set_features(struct virtio_device *dev, rt_uint64_t features)
  97. {
  98. _virtio_dev_check(dev);
  99. if (dev->version == 1)
  100. {
  101. /* Legacy: only lower 32 bits */
  102. dev->mmio_config->driver_features = (rt_uint32_t)features;
  103. }
  104. else
  105. {
  106. /* Modern: write both 32-bit halves */
  107. dev->mmio_config->driver_features_sel = 0;
  108. dev->mmio_config->driver_features = (rt_uint32_t)features;
  109. dev->mmio_config->driver_features_sel = 1;
  110. dev->mmio_config->driver_features = (rt_uint32_t)(features >> 32);
  111. }
  112. }
  113. rt_bool_t virtio_has_feature_64(struct virtio_device *dev, rt_uint64_t features, rt_uint32_t feature_bit)
  114. {
  115. return !!(features & (1ULL << feature_bit));
  116. }
  117. rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num)
  118. {
  119. _virtio_dev_check(dev);
  120. dev->queues = rt_malloc(sizeof(struct virtq) * queues_num);
  121. if (dev->queues != RT_NULL)
  122. {
  123. dev->queues_num = queues_num;
  124. return RT_EOK;
  125. }
  126. return -RT_ENOMEM;
  127. }
  128. void virtio_queues_free(struct virtio_device *dev)
  129. {
  130. if (dev->queues != RT_NULL)
  131. {
  132. dev->queues_num = 0;
  133. rt_free(dev->queues);
  134. }
  135. }
  136. rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size)
  137. {
  138. int i;
  139. void *pages;
  140. rt_size_t pages_total_size;
  141. struct virtq *queue;
  142. rt_uint64_t desc_addr, avail_addr, used_addr;
  143. _virtio_dev_check(dev);
  144. RT_ASSERT(dev->mmio_config->queue_num_max > 0);
  145. RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
  146. /* ring_size is power of 2 */
  147. RT_ASSERT(ring_size > 0);
  148. RT_ASSERT(((ring_size - 1) & ring_size) == 0);
  149. queue = &dev->queues[queue_index];
  150. pages_total_size = VIRTIO_PAGE_ALIGN(
  151. VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size);
  152. pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE);
  153. if (pages == RT_NULL)
  154. {
  155. return -RT_ENOMEM;
  156. }
  157. queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size);
  158. if (queue->free == RT_NULL)
  159. {
  160. rt_free_align(pages);
  161. return -RT_ENOMEM;
  162. }
  163. rt_memset(pages, 0, pages_total_size);
  164. /* Set queue selector */
  165. dev->mmio_config->queue_sel = queue_index;
  166. dev->mmio_config->queue_num = ring_size;
  167. /* Calculate queue area addresses */
  168. queue->num = ring_size;
  169. queue->desc = (struct virtq_desc *)((rt_ubase_t)pages);
  170. queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size));
  171. queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN(
  172. (rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE);
  173. desc_addr = VIRTIO_VA2PA(queue->desc);
  174. avail_addr = VIRTIO_VA2PA(queue->avail);
  175. used_addr = VIRTIO_VA2PA(queue->used);
  176. if (dev->version == 1)
  177. {
  178. /* Legacy virtio: use queue_pfn */
  179. dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE;
  180. dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE;
  181. dev->mmio_config->queue_pfn = desc_addr >> VIRTIO_PAGE_SHIFT;
  182. }
  183. else
  184. {
  185. /* Modern virtio: use separate descriptor/driver/device registers */
  186. dev->mmio_config->queue_desc_low = (rt_uint32_t)desc_addr;
  187. dev->mmio_config->queue_desc_high = (rt_uint32_t)(desc_addr >> 32);
  188. dev->mmio_config->queue_driver_low = (rt_uint32_t)avail_addr;
  189. dev->mmio_config->queue_driver_high = (rt_uint32_t)(avail_addr >> 32);
  190. dev->mmio_config->queue_device_low = (rt_uint32_t)used_addr;
  191. dev->mmio_config->queue_device_high = (rt_uint32_t)(used_addr >> 32);
  192. /* Enable the queue */
  193. dev->mmio_config->queue_ready = 1;
  194. }
  195. queue->used_idx = 0;
  196. /* All descriptors start out unused */
  197. for (i = 0; i < ring_size; ++i)
  198. {
  199. queue->free[i] = RT_TRUE;
  200. }
  201. queue->free_count = ring_size;
  202. return RT_EOK;
  203. }
  204. void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index)
  205. {
  206. struct virtq *queue;
  207. _virtio_dev_check(dev);
  208. RT_ASSERT(dev->mmio_config->queue_num_max > 0);
  209. RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
  210. queue = &dev->queues[queue_index];
  211. RT_ASSERT(queue->num > 0);
  212. rt_free(queue->free);
  213. rt_free_align((void *)queue->desc);
  214. dev->mmio_config->queue_sel = queue_index;
  215. if (dev->version == 1)
  216. {
  217. /* Legacy virtio */
  218. dev->mmio_config->queue_pfn = 0;
  219. }
  220. else
  221. {
  222. /* Modern virtio */
  223. dev->mmio_config->queue_ready = 0;
  224. }
  225. queue->num = 0;
  226. queue->desc = RT_NULL;
  227. queue->avail = RT_NULL;
  228. queue->used = RT_NULL;
  229. }
  230. void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index)
  231. {
  232. _virtio_dev_check(dev);
  233. dev->mmio_config->queue_notify = queue_index;
  234. }
  235. void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  236. {
  237. rt_size_t ring_size;
  238. struct virtq *queue;
  239. _virtio_dev_check(dev);
  240. queue = &dev->queues[queue_index];
  241. ring_size = queue->num;
  242. /* Tell the device the first index in our chain of descriptors */
  243. queue->avail->ring[queue->avail->idx % ring_size] = desc_index;
  244. rt_hw_dsb();
  245. /* Tell the device another avail ring entry is available */
  246. queue->avail->idx++;
  247. rt_hw_dsb();
  248. }
  249. rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index)
  250. {
  251. int i;
  252. struct virtq *queue;
  253. _virtio_dev_check(dev);
  254. RT_ASSERT(queue_index < dev->queues_num);
  255. queue = &dev->queues[queue_index];
  256. if (queue->free_count > 0)
  257. {
  258. rt_size_t ring_size = queue->num;
  259. for (i = 0; i < ring_size; ++i)
  260. {
  261. if (queue->free[i])
  262. {
  263. queue->free[i] = RT_FALSE;
  264. queue->free_count--;
  265. return (rt_uint16_t)i;
  266. }
  267. }
  268. }
  269. return VIRTQ_INVALID_DESC_ID;
  270. }
  271. void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  272. {
  273. struct virtq *queue;
  274. _virtio_dev_check(dev);
  275. queue = &dev->queues[queue_index];
  276. RT_ASSERT(queue_index < dev->queues_num);
  277. RT_ASSERT(!queue->free[desc_index]);
  278. queue->desc[desc_index].addr = 0;
  279. queue->desc[desc_index].len = 0;
  280. queue->desc[desc_index].flags = 0;
  281. queue->desc[desc_index].next = 0;
  282. queue->free[desc_index] = RT_TRUE;
  283. queue->free_count++;
  284. }
  285. rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count,
  286. rt_uint16_t *indexs)
  287. {
  288. int i, j;
  289. _virtio_dev_check(dev);
  290. RT_ASSERT(indexs != RT_NULL);
  291. if (dev->queues[queue_index].free_count < count)
  292. {
  293. return -RT_ERROR;
  294. }
  295. for (i = 0; i < count; ++i)
  296. {
  297. indexs[i] = virtio_alloc_desc(dev, queue_index);
  298. if (indexs[i] == VIRTQ_INVALID_DESC_ID)
  299. {
  300. for (j = 0; j < i; ++j)
  301. {
  302. virtio_free_desc(dev, queue_index, indexs[j]);
  303. }
  304. return -RT_ERROR;
  305. }
  306. }
  307. return RT_EOK;
  308. }
  309. void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  310. {
  311. rt_uint16_t flags, next;
  312. struct virtq_desc *desc;
  313. _virtio_dev_check(dev);
  314. desc = &dev->queues[queue_index].desc[0];
  315. for (;;)
  316. {
  317. flags = desc[desc_index].flags;
  318. next = desc[desc_index].next;
  319. virtio_free_desc(dev, queue_index, desc_index);
  320. if (flags & VIRTQ_DESC_F_NEXT)
  321. {
  322. desc_index = next;
  323. }
  324. else
  325. {
  326. break;
  327. }
  328. }
  329. }
  330. void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index,
  331. rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next)
  332. {
  333. struct virtq_desc *desc;
  334. _virtio_dev_check(dev);
  335. desc = &dev->queues[queue_index].desc[desc_index];
  336. desc->addr = addr;
  337. desc->len = len;
  338. desc->flags = flags;
  339. desc->next = next;
  340. }
  341. #ifdef RT_USING_SMART
  342. #ifdef RT_USING_VIRTIO_GPU
  343. #include <virtio_gpu.h>
  344. #include "drivers/lcd.h"
  345. #include <dfs_file.h>
  346. #include <lwp_user_mm.h>
  347. static struct rt_device_graphic_info _graphic_info;
  348. static struct rt_device_rect_info _rect_info;
  349. static struct rt_device _fb = {};
  350. static rt_device_t _gpu_dev = RT_NULL;
  351. static rt_err_t fb_open(rt_device_t dev, rt_uint16_t oflag)
  352. {
  353. return RT_EOK;
  354. }
  355. static rt_err_t fb_close(rt_device_t dev)
  356. {
  357. return RT_EOK;
  358. }
  359. static rt_err_t fb_control(rt_device_t dev, int cmd, void *args)
  360. {
  361. switch(cmd)
  362. {
  363. case FBIOPAN_DISPLAY:
  364. {
  365. rt_hw_cpu_dcache_clean(_graphic_info.framebuffer, _graphic_info.smem_len);
  366. rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info);
  367. break;
  368. }
  369. case FBIOGET_FSCREENINFO:
  370. {
  371. struct fb_fix_screeninfo *info = (struct fb_fix_screeninfo *)args;
  372. strncpy(info->id, "lcd", sizeof(info->id));
  373. info->smem_len = _graphic_info.smem_len;
  374. break;
  375. }
  376. case FBIOGET_VSCREENINFO:
  377. {
  378. struct fb_var_screeninfo *info = (struct fb_var_screeninfo *)args;
  379. info->bits_per_pixel = _graphic_info.bits_per_pixel;
  380. info->xres = _graphic_info.width;
  381. info->yres = _graphic_info.height;
  382. info->yres_virtual = _graphic_info.height;
  383. info->xres_virtual = _graphic_info.width;
  384. info->transp.offset = 24;
  385. info->transp.length = 8;
  386. info->red.offset = 0;
  387. info->red.length = 8;
  388. info->green.offset = 8;
  389. info->green.length = 8;
  390. info->blue.offset = 16;
  391. info->blue.length = 8;
  392. break;
  393. }
  394. case RT_FIOMMAP2:
  395. {
  396. struct dfs_mmap2_args *mmap2 = (struct dfs_mmap2_args *)args;
  397. if(mmap2)
  398. {
  399. mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, rt_kmem_v2p(_graphic_info.framebuffer), mmap2->length, 1);
  400. }
  401. else
  402. {
  403. return -EIO;
  404. }
  405. break;
  406. }
  407. default:
  408. break;
  409. }
  410. return RT_EOK;
  411. }
  412. #ifdef RT_USING_DEVICE_OPS
  413. const static struct rt_device_ops fb_ops =
  414. {
  415. RT_NULL,
  416. fb_open,
  417. fb_close,
  418. RT_NULL,
  419. RT_NULL,
  420. fb_control
  421. };
  422. #endif
  423. static int fb_init()
  424. {
  425. _gpu_dev = rt_device_find("virtio-gpu0");
  426. if(_gpu_dev == RT_NULL)
  427. {
  428. return -RT_ERROR;
  429. }
  430. if(_gpu_dev != RT_NULL && rt_device_open(_gpu_dev, 0) == RT_EOK)
  431. {
  432. rt_memset(&_graphic_info, 0, sizeof(_graphic_info));
  433. rt_memset(&_rect_info, 0, sizeof(_rect_info));
  434. rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY, RT_NULL);
  435. rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_CREATE_2D, (void *)RTGRAPHIC_PIXEL_FORMAT_RGB888);
  436. rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_GET_INFO, &_graphic_info);
  437. _rect_info.x = 0;
  438. _rect_info.y = 0;
  439. _rect_info.width = _graphic_info.width;
  440. _rect_info.height = _graphic_info.height;
  441. memset(_graphic_info.framebuffer, 0xff, _graphic_info.smem_len);
  442. rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info);
  443. }
  444. if(rt_device_find("fb0") != RT_NULL)
  445. {
  446. rt_kprintf("a device named fb0 already exists\n");
  447. return -RT_ERROR;
  448. }
  449. _fb.type = RT_Device_Class_Miscellaneous;
  450. #ifdef RT_USING_DEVICE_OPS
  451. _fb.ops = &fb_ops;
  452. #else
  453. _fb.init = RT_NULL;
  454. _fb.open = fb_open;
  455. _fb.close = fb_close;
  456. _fb.read = RT_NULL;
  457. _fb.write = RT_NULL;
  458. _fb.control = fb_control;
  459. _fb.user_data = RT_NULL;
  460. #endif
  461. rt_device_register(&_fb, "fb0", RT_DEVICE_FLAG_RDWR);
  462. return RT_EOK;
  463. }
  464. INIT_COMPONENT_EXPORT(fb_init);
  465. #endif
  466. #endif