virtio.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-11-11 GuEe-GUI the first version
  9. * 2023-10-12 fangjianzhou support SDL2
  10. */
  11. #include <rtthread.h>
  12. #include <cpuport.h>
  13. #include <virtio.h>
  14. rt_inline void _virtio_dev_check(struct virtio_device *dev)
  15. {
  16. RT_ASSERT(dev != RT_NULL);
  17. RT_ASSERT(dev->mmio_config != RT_NULL);
  18. }
  19. void virtio_reset_device(struct virtio_device *dev)
  20. {
  21. _virtio_dev_check(dev);
  22. dev->mmio_config->status = 0;
  23. }
  24. void virtio_status_acknowledge_driver(struct virtio_device *dev)
  25. {
  26. _virtio_dev_check(dev);
  27. dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
  28. }
  29. void virtio_status_driver_ok(struct virtio_device *dev)
  30. {
  31. _virtio_dev_check(dev);
  32. dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK;
  33. }
  34. void virtio_interrupt_ack(struct virtio_device *dev)
  35. {
  36. rt_uint32_t status;
  37. _virtio_dev_check(dev);
  38. status = dev->mmio_config->interrupt_status;
  39. if (status != 0)
  40. {
  41. dev->mmio_config->interrupt_ack = status;
  42. }
  43. }
  44. rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit)
  45. {
  46. _virtio_dev_check(dev);
  47. return !!(dev->mmio_config->device_features & (1UL << feature_bit));
  48. }
  49. rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num)
  50. {
  51. _virtio_dev_check(dev);
  52. dev->queues = rt_malloc(sizeof(struct virtq) * queues_num);
  53. if (dev->queues != RT_NULL)
  54. {
  55. dev->queues_num = queues_num;
  56. return RT_EOK;
  57. }
  58. return -RT_ENOMEM;
  59. }
  60. void virtio_queues_free(struct virtio_device *dev)
  61. {
  62. if (dev->queues != RT_NULL)
  63. {
  64. dev->queues_num = 0;
  65. rt_free(dev->queues);
  66. }
  67. }
  68. rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size)
  69. {
  70. int i;
  71. void *pages;
  72. rt_size_t pages_total_size;
  73. struct virtq *queue;
  74. _virtio_dev_check(dev);
  75. RT_ASSERT(queue_index < dev->queues_num);
  76. /* ring_size is power of 2 */
  77. RT_ASSERT(ring_size > 0);
  78. RT_ASSERT(((ring_size - 1) & ring_size) == 0);
  79. /* Select the queue first, then read queue_num_max */
  80. dev->mmio_config->queue_sel = queue_index;
  81. RT_ASSERT(dev->mmio_config->queue_num_max > 0);
  82. RT_ASSERT(ring_size <= dev->mmio_config->queue_num_max);
  83. queue = &dev->queues[queue_index];
  84. pages_total_size = VIRTIO_PAGE_ALIGN(
  85. VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size);
  86. pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE);
  87. if (pages == RT_NULL)
  88. {
  89. return -RT_ENOMEM;
  90. }
  91. queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size);
  92. if (queue->free == RT_NULL)
  93. {
  94. rt_free_align(pages);
  95. return -RT_ENOMEM;
  96. }
  97. rt_memset(pages, 0, pages_total_size);
  98. dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE;
  99. dev->mmio_config->queue_num = ring_size;
  100. dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE;
  101. dev->mmio_config->queue_pfn = VIRTIO_VA2PA(pages) >> VIRTIO_PAGE_SHIFT;
  102. queue->num = ring_size;
  103. queue->desc = (struct virtq_desc *)((rt_ubase_t)pages);
  104. queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size));
  105. queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN(
  106. (rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE);
  107. queue->used_idx = 0;
  108. /* All descriptors start out unused */
  109. for (i = 0; i < ring_size; ++i)
  110. {
  111. queue->free[i] = RT_TRUE;
  112. }
  113. queue->free_count = ring_size;
  114. return RT_EOK;
  115. }
  116. void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index)
  117. {
  118. struct virtq *queue;
  119. _virtio_dev_check(dev);
  120. RT_ASSERT(queue_index < dev->queues_num);
  121. /* Select the queue first, then read queue_num_max */
  122. dev->mmio_config->queue_sel = queue_index;
  123. RT_ASSERT(dev->mmio_config->queue_num_max > 0);
  124. queue = &dev->queues[queue_index];
  125. RT_ASSERT(queue->num > 0);
  126. rt_free(queue->free);
  127. rt_free_align((void *)queue->desc);
  128. dev->mmio_config->queue_pfn = RT_NULL;
  129. queue->num = 0;
  130. queue->desc = RT_NULL;
  131. queue->avail = RT_NULL;
  132. queue->used = RT_NULL;
  133. }
  134. void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index)
  135. {
  136. _virtio_dev_check(dev);
  137. dev->mmio_config->queue_notify = queue_index;
  138. }
  139. void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  140. {
  141. rt_size_t ring_size;
  142. struct virtq *queue;
  143. _virtio_dev_check(dev);
  144. queue = &dev->queues[queue_index];
  145. ring_size = queue->num;
  146. /* Tell the device the first index in our chain of descriptors */
  147. queue->avail->ring[queue->avail->idx % ring_size] = desc_index;
  148. rt_hw_dsb();
  149. /* Tell the device another avail ring entry is available */
  150. queue->avail->idx++;
  151. rt_hw_dsb();
  152. }
  153. rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index)
  154. {
  155. int i;
  156. struct virtq *queue;
  157. _virtio_dev_check(dev);
  158. RT_ASSERT(queue_index < dev->queues_num);
  159. queue = &dev->queues[queue_index];
  160. if (queue->free_count > 0)
  161. {
  162. rt_size_t ring_size = queue->num;
  163. for (i = 0; i < ring_size; ++i)
  164. {
  165. if (queue->free[i])
  166. {
  167. queue->free[i] = RT_FALSE;
  168. queue->free_count--;
  169. return (rt_uint16_t)i;
  170. }
  171. }
  172. }
  173. return VIRTQ_INVALID_DESC_ID;
  174. }
  175. void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  176. {
  177. struct virtq *queue;
  178. _virtio_dev_check(dev);
  179. queue = &dev->queues[queue_index];
  180. RT_ASSERT(queue_index < dev->queues_num);
  181. RT_ASSERT(!queue->free[desc_index]);
  182. queue->desc[desc_index].addr = 0;
  183. queue->desc[desc_index].len = 0;
  184. queue->desc[desc_index].flags = 0;
  185. queue->desc[desc_index].next = 0;
  186. queue->free[desc_index] = RT_TRUE;
  187. queue->free_count++;
  188. }
  189. rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count,
  190. rt_uint16_t *indexs)
  191. {
  192. int i, j;
  193. _virtio_dev_check(dev);
  194. RT_ASSERT(indexs != RT_NULL);
  195. if (dev->queues[queue_index].free_count < count)
  196. {
  197. return -RT_ERROR;
  198. }
  199. for (i = 0; i < count; ++i)
  200. {
  201. indexs[i] = virtio_alloc_desc(dev, queue_index);
  202. if (indexs[i] == VIRTQ_INVALID_DESC_ID)
  203. {
  204. for (j = 0; j < i; ++j)
  205. {
  206. virtio_free_desc(dev, queue_index, indexs[j]);
  207. }
  208. return -RT_ERROR;
  209. }
  210. }
  211. return RT_EOK;
  212. }
  213. void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
  214. {
  215. rt_uint16_t flags, next;
  216. struct virtq_desc *desc;
  217. _virtio_dev_check(dev);
  218. desc = &dev->queues[queue_index].desc[0];
  219. for (;;)
  220. {
  221. flags = desc[desc_index].flags;
  222. next = desc[desc_index].next;
  223. virtio_free_desc(dev, queue_index, desc_index);
  224. if (flags & VIRTQ_DESC_F_NEXT)
  225. {
  226. desc_index = next;
  227. }
  228. else
  229. {
  230. break;
  231. }
  232. }
  233. }
  234. void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index,
  235. rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next)
  236. {
  237. struct virtq_desc *desc;
  238. _virtio_dev_check(dev);
  239. desc = &dev->queues[queue_index].desc[desc_index];
  240. desc->addr = addr;
  241. desc->len = len;
  242. desc->flags = flags;
  243. desc->next = next;
  244. }
  245. #ifdef RT_USING_SMART
  246. #ifdef RT_USING_VIRTIO_GPU
  247. #include <virtio_gpu.h>
  248. #include "drivers/lcd.h"
  249. #include <dfs_file.h>
  250. #include <lwp_user_mm.h>
  251. static struct rt_device_graphic_info _graphic_info;
  252. static struct rt_device_rect_info _rect_info;
  253. static struct rt_device _fb = {};
  254. static rt_device_t _gpu_dev = RT_NULL;
  255. static rt_err_t fb_open(rt_device_t dev, rt_uint16_t oflag)
  256. {
  257. return RT_EOK;
  258. }
  259. static rt_err_t fb_close(rt_device_t dev)
  260. {
  261. return RT_EOK;
  262. }
  263. static rt_err_t fb_control(rt_device_t dev, int cmd, void *args)
  264. {
  265. switch(cmd)
  266. {
  267. case FBIOPAN_DISPLAY:
  268. {
  269. rt_hw_cpu_dcache_clean(_graphic_info.framebuffer, _graphic_info.smem_len);
  270. rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info);
  271. break;
  272. }
  273. case FBIOGET_FSCREENINFO:
  274. {
  275. struct fb_fix_screeninfo *info = (struct fb_fix_screeninfo *)args;
  276. strncpy(info->id, "lcd", sizeof(info->id));
  277. info->smem_len = _graphic_info.smem_len;
  278. break;
  279. }
  280. case FBIOGET_VSCREENINFO:
  281. {
  282. struct fb_var_screeninfo *info = (struct fb_var_screeninfo *)args;
  283. info->bits_per_pixel = _graphic_info.bits_per_pixel;
  284. info->xres = _graphic_info.width;
  285. info->yres = _graphic_info.height;
  286. info->yres_virtual = _graphic_info.height;
  287. info->xres_virtual = _graphic_info.width;
  288. info->transp.offset = 24;
  289. info->transp.length = 8;
  290. info->red.offset = 0;
  291. info->red.length = 8;
  292. info->green.offset = 8;
  293. info->green.length = 8;
  294. info->blue.offset = 16;
  295. info->blue.length = 8;
  296. break;
  297. }
  298. case RT_FIOMMAP2:
  299. {
  300. struct dfs_mmap2_args *mmap2 = (struct dfs_mmap2_args *)args;
  301. if(mmap2)
  302. {
  303. mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, rt_kmem_v2p(_graphic_info.framebuffer), mmap2->length, 1);
  304. }
  305. else
  306. {
  307. return -EIO;
  308. }
  309. break;
  310. }
  311. default:
  312. break;
  313. }
  314. return RT_EOK;
  315. }
  316. #ifdef RT_USING_DEVICE_OPS
  317. const static struct rt_device_ops fb_ops =
  318. {
  319. RT_NULL,
  320. fb_open,
  321. fb_close,
  322. RT_NULL,
  323. RT_NULL,
  324. fb_control
  325. };
  326. #endif
  327. static int fb_init()
  328. {
  329. _gpu_dev = rt_device_find("virtio-gpu0");
  330. if(_gpu_dev == RT_NULL)
  331. {
  332. return -RT_ERROR;
  333. }
  334. if(_gpu_dev != RT_NULL && rt_device_open(_gpu_dev, 0) == RT_EOK)
  335. {
  336. rt_memset(&_graphic_info, 0, sizeof(_graphic_info));
  337. rt_memset(&_rect_info, 0, sizeof(_rect_info));
  338. rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY, RT_NULL);
  339. rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_CREATE_2D, (void *)RTGRAPHIC_PIXEL_FORMAT_RGB888);
  340. rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_GET_INFO, &_graphic_info);
  341. _rect_info.x = 0;
  342. _rect_info.y = 0;
  343. _rect_info.width = _graphic_info.width;
  344. _rect_info.height = _graphic_info.height;
  345. memset(_graphic_info.framebuffer, 0xff, _graphic_info.smem_len);
  346. rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info);
  347. }
  348. if(rt_device_find("fb0") != RT_NULL)
  349. {
  350. rt_kprintf("a device named fb0 already exists\n");
  351. return -RT_ERROR;
  352. }
  353. _fb.type = RT_Device_Class_Miscellaneous;
  354. #ifdef RT_USING_DEVICE_OPS
  355. _fb.ops = &fb_ops;
  356. #else
  357. _fb.init = RT_NULL;
  358. _fb.open = fb_open;
  359. _fb.close = fb_close;
  360. _fb.read = RT_NULL;
  361. _fb.write = RT_NULL;
  362. _fb.control = fb_control;
  363. _fb.user_data = RT_NULL;
  364. #endif
  365. rt_device_register(&_fb, "fb0", RT_DEVICE_FLAG_RDWR);
  366. return RT_EOK;
  367. }
  368. INIT_COMPONENT_EXPORT(fb_init);
  369. #endif
  370. #endif