mfd-edu.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-25 GuEe-GUI the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #define DBG_TAG "mfd.edu"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <cpuport.h>
  17. #define PCI_EDU_REGS_BAR 0
  18. #define EDU_REG_VERSION 0x00
  19. #define EDU_REG_CARD_LIVENESS 0x04
  20. #define EDU_REG_VALUE 0x08
  21. #define EDU_REG_STATUS 0x20
  22. #define EDU_REG_STATUS_IRQ 0x80
  23. #define EDU_REG_IRQ_STATUS 0x24
  24. #define EDU_REG_ISR_FACT 0x00000001
  25. #define EDU_REG_ISR_DMA 0x00000100
  26. #define EDU_REG_IRQ_RAISE 0x60
  27. #define EDU_REG_IRQ_ACK 0x64
  28. #define EDU_REG_DMA_SRC 0x80
  29. #define EDU_REG_DMA_DST 0x88
  30. #define EDU_REG_DMA_SIZE 0x90
  31. #define EDU_REG_DMA_CMD 0x98
  32. #define EDU_DMA_CMD_RUN 0x1
  33. #define EDU_DMA_CMD_TO_PCI 0x0
  34. #define EDU_DMA_CMD_FROM_PCI 0x2
  35. #define EDU_DMA_CMD_IRQ 0x4
  36. #define EDU_FACTORIAL_ACK 0x00000001
  37. #define EDU_DMA_ACK 0x00000100
  38. #define EDU_DMA_FREE (~0UL)
  39. #define EDU_DMA_BASE 0x40000
  40. #define EDU_DMA_SIZE ((rt_size_t)(4096 - 1))
  41. #define EDU_DMA_POLL_SIZE 128
  42. struct edu_device
  43. {
  44. struct rt_device parent;
  45. struct rt_dma_controller dma_ctrl;
  46. void *regs;
  47. rt_uint32_t ack;
  48. rt_bool_t dma_work;
  49. struct rt_mutex lock;
  50. struct rt_completion done;
  51. };
  52. #define raw_to_edu_device(raw) rt_container_of(raw, struct edu_device, parent)
  53. #define raw_to_edu_dma(raw) rt_container_of(raw, struct edu_device, dma_ctrl)
  54. rt_inline rt_uint32_t edu_readl(struct edu_device *edu, int offset)
  55. {
  56. return HWREG32(edu->regs + offset);
  57. }
  58. rt_inline void edu_writel(struct edu_device *edu, int offset, rt_uint32_t value)
  59. {
  60. HWREG32(edu->regs + offset) = value;
  61. }
  62. static rt_err_t edu_dma_start(struct rt_dma_chan *chan)
  63. {
  64. rt_size_t len;
  65. rt_ubase_t dma_addr_src, dma_addr_dst;
  66. struct edu_device *edu = raw_to_edu_dma(chan->ctrl);
  67. rt_mutex_take(&edu->lock, RT_WAITING_FOREVER);
  68. edu->ack = EDU_DMA_ACK;
  69. edu->dma_work = RT_TRUE;
  70. len = chan->transfer.buffer_len;
  71. dma_addr_src = chan->transfer.src_addr;
  72. dma_addr_dst = chan->transfer.dst_addr;
  73. while ((rt_ssize_t)len > 0 && edu->dma_work)
  74. {
  75. rt_uint32_t cmd = EDU_DMA_CMD_RUN;
  76. rt_uint32_t blen = rt_min_t(rt_size_t, EDU_DMA_SIZE, len);
  77. if (blen > EDU_DMA_POLL_SIZE)
  78. {
  79. cmd |= EDU_DMA_CMD_IRQ;
  80. }
  81. edu_writel(edu, EDU_REG_DMA_SRC, dma_addr_src);
  82. edu_writel(edu, EDU_REG_DMA_DST, EDU_DMA_BASE);
  83. edu_writel(edu, EDU_REG_DMA_SIZE, blen);
  84. edu_writel(edu, EDU_REG_DMA_CMD, cmd | EDU_DMA_CMD_TO_PCI);
  85. if (cmd & EDU_DMA_CMD_IRQ)
  86. {
  87. rt_completion_wait(&edu->done, RT_WAITING_FOREVER);
  88. }
  89. else
  90. {
  91. while (edu_readl(edu, EDU_REG_DMA_CMD) & EDU_DMA_CMD_RUN)
  92. {
  93. rt_hw_cpu_relax();
  94. }
  95. }
  96. edu_writel(edu, EDU_REG_DMA_SRC, EDU_DMA_BASE);
  97. edu_writel(edu, EDU_REG_DMA_DST, dma_addr_dst);
  98. edu_writel(edu, EDU_REG_DMA_SIZE, blen);
  99. edu_writel(edu, EDU_REG_DMA_CMD, cmd | EDU_DMA_CMD_FROM_PCI);
  100. if (cmd & EDU_DMA_CMD_IRQ)
  101. {
  102. rt_completion_wait(&edu->done, RT_WAITING_FOREVER);
  103. }
  104. else
  105. {
  106. while (edu_readl(edu, EDU_REG_DMA_CMD) & EDU_DMA_CMD_RUN)
  107. {
  108. rt_hw_cpu_relax();
  109. }
  110. }
  111. len -= blen;
  112. dma_addr_src += blen;
  113. dma_addr_dst += blen;
  114. }
  115. rt_mutex_release(&edu->lock);
  116. rt_dma_chan_done(chan, chan->transfer.buffer_len - len);
  117. return RT_EOK;
  118. }
  119. static rt_err_t edu_dma_stop(struct rt_dma_chan *chan)
  120. {
  121. struct edu_device *edu = raw_to_edu_dma(chan->ctrl);
  122. edu->dma_work = RT_FALSE;
  123. return RT_EOK;
  124. }
  125. static rt_err_t edu_dma_config(struct rt_dma_chan *chan,
  126. struct rt_dma_slave_config *conf)
  127. {
  128. return RT_EOK;
  129. }
  130. static rt_err_t edu_dma_prep_memcpy(struct rt_dma_chan *chan,
  131. rt_ubase_t dma_addr_src, rt_ubase_t dma_addr_dst, rt_size_t len)
  132. {
  133. return RT_EOK;
  134. }
  135. const static struct rt_dma_controller_ops edu_dma_ops =
  136. {
  137. .start = edu_dma_start,
  138. .stop = edu_dma_stop,
  139. .config = edu_dma_config,
  140. .prep_memcpy = edu_dma_prep_memcpy,
  141. };
  142. static rt_ssize_t edu_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size)
  143. {
  144. rt_uint32_t number;
  145. struct edu_device *edu = raw_to_edu_device(dev);
  146. rt_mutex_take(&edu->lock, RT_WAITING_FOREVER);
  147. number = edu_readl(edu, EDU_REG_VALUE);
  148. rt_mutex_release(&edu->lock);
  149. rt_memcpy(buffer, &number, rt_min(sizeof(number), size));
  150. return rt_min(sizeof(number), size);
  151. }
  152. static rt_ssize_t edu_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size)
  153. {
  154. rt_uint32_t number = 0;
  155. struct edu_device *edu = raw_to_edu_device(dev);
  156. rt_memcpy(&number, buffer, rt_min(sizeof(number), size));
  157. rt_mutex_take(&edu->lock, RT_WAITING_FOREVER);
  158. edu->ack = EDU_FACTORIAL_ACK;
  159. edu_writel(edu, EDU_REG_STATUS, EDU_REG_STATUS_IRQ);
  160. edu_writel(edu, EDU_REG_VALUE, number);
  161. rt_completion_wait(&edu->done, RT_WAITING_FOREVER);
  162. rt_mutex_release(&edu->lock);
  163. return rt_min(sizeof(number), size);
  164. }
  165. #ifdef RT_USING_DEVICE_OPS
  166. const static struct rt_device_ops edu_ops =
  167. {
  168. .read = edu_read,
  169. .write = edu_write,
  170. };
  171. #endif
  172. static void edu_isr(int irqno, void *param)
  173. {
  174. struct edu_device *edu = param;
  175. if (edu_readl(edu, EDU_REG_IRQ_STATUS) & (EDU_REG_ISR_FACT | EDU_REG_ISR_DMA))
  176. {
  177. edu_writel(edu, EDU_REG_IRQ_ACK, edu->ack);
  178. rt_completion_done(&edu->done);
  179. }
  180. }
  181. static rt_err_t edu_probe(struct rt_pci_device *pdev)
  182. {
  183. rt_err_t err;
  184. struct edu_device *edu = rt_calloc(1, sizeof(*edu));
  185. if (!edu)
  186. {
  187. return -RT_ENOMEM;
  188. }
  189. edu->regs = rt_pci_iomap(pdev, PCI_EDU_REGS_BAR);
  190. if (!edu->regs)
  191. {
  192. err = -RT_EIO;
  193. goto _fail;
  194. }
  195. edu->dma_ctrl.dev = &pdev->parent;
  196. edu->dma_ctrl.ops = &edu_dma_ops;
  197. rt_dma_controller_add_direction(&edu->dma_ctrl, RT_DMA_MEM_TO_MEM);
  198. /* Config in QEMU option: -device edu,dma_mask=0xffffffff */
  199. rt_dma_controller_set_addr_mask(&edu->dma_ctrl, RT_DMA_ADDR_MASK(32));
  200. if ((err = rt_dma_controller_register(&edu->dma_ctrl)))
  201. {
  202. goto _fail;
  203. }
  204. edu->parent.type = RT_Device_Class_Char;
  205. #ifdef RT_USING_DEVICE_OPS
  206. edu->parent.ops = &edu_ops;
  207. #else
  208. edu->parent.read = edu_read;
  209. edu->parent.write = edu_write;
  210. #endif
  211. if ((err = rt_device_register(&edu->parent, "edu", RT_DEVICE_FLAG_RDWR)))
  212. {
  213. goto _free_dma;
  214. }
  215. pdev->parent.user_data = edu;
  216. rt_mutex_init(&edu->lock, "edu", RT_IPC_FLAG_PRIO);
  217. rt_completion_init(&edu->done);
  218. rt_hw_interrupt_install(pdev->irq, edu_isr, edu, "edu");
  219. rt_pci_irq_unmask(pdev);
  220. LOG_D("EDU PCI device v%d.%d", edu_readl(edu, EDU_REG_VERSION) >> 16,
  221. (edu_readl(edu, EDU_REG_VERSION) >> 8) & 0xff);
  222. return RT_EOK;
  223. _free_dma:
  224. rt_dma_controller_unregister(&edu->dma_ctrl);
  225. _fail:
  226. if (edu->regs)
  227. {
  228. rt_iounmap(edu->regs);
  229. }
  230. rt_free(edu);
  231. return err;
  232. }
  233. static rt_err_t edu_remove(struct rt_pci_device *pdev)
  234. {
  235. struct edu_device *edu = pdev->parent.user_data;
  236. /* INTx is shared, don't mask all */
  237. rt_hw_interrupt_umask(pdev->irq);
  238. rt_pci_irq_mask(pdev);
  239. rt_dma_controller_unregister(&edu->dma_ctrl);
  240. rt_device_unregister(&edu->parent);
  241. rt_mutex_detach(&edu->lock);
  242. rt_iounmap(edu->regs);
  243. rt_free(edu);
  244. return RT_EOK;
  245. }
  246. static const struct rt_pci_device_id edu_ids[] =
  247. {
  248. { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_QEMU, 0x11e8), },
  249. { /* sentinel */ }
  250. };
  251. static struct rt_pci_driver edu_driver =
  252. {
  253. .name = "edu",
  254. .ids = edu_ids,
  255. .probe = edu_probe,
  256. .remove = edu_remove,
  257. };
  258. RT_PCI_DRIVER_EXPORT(edu_driver);