dma.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-25 GuEe-GUI the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #define DBG_TAG "rtdm.dma"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. static rt_list_t dmac_nodes = RT_LIST_OBJECT_INIT(dmac_nodes);
  17. static RT_DEFINE_SPINLOCK(dmac_nodes_lock);
  18. static void dma_lock(struct rt_dma_controller *ctrl)
  19. {
  20. if (rt_thread_self())
  21. {
  22. rt_mutex_take(&ctrl->mutex, RT_WAITING_FOREVER);
  23. }
  24. }
  25. static void dma_unlock(struct rt_dma_controller *ctrl)
  26. {
  27. if (rt_thread_self())
  28. {
  29. rt_mutex_release(&ctrl->mutex);
  30. }
  31. }
  32. rt_err_t rt_dma_controller_register(struct rt_dma_controller *ctrl)
  33. {
  34. const char *dev_name;
  35. char dma_name[RT_NAME_MAX];
  36. if (!ctrl || !ctrl->dev || !ctrl->ops)
  37. {
  38. return -RT_EINVAL;
  39. }
  40. dev_name = rt_dm_dev_get_name(ctrl->dev);
  41. if (rt_bitmap_next_set_bit(ctrl->dir_cap, 0, RT_DMA_DIR_MAX) == RT_DMA_DIR_MAX)
  42. {
  43. LOG_E("%s: Not direction capability", dev_name);
  44. return -RT_EINVAL;
  45. }
  46. rt_snprintf(dma_name, sizeof(dma_name), "%s-dmac", dev_name);
  47. rt_list_init(&ctrl->list);
  48. rt_spin_lock(&dmac_nodes_lock);
  49. rt_list_insert_before(&dmac_nodes, &ctrl->list);
  50. rt_spin_unlock(&dmac_nodes_lock);
  51. rt_list_init(&ctrl->channels_nodes);
  52. rt_mutex_init(&ctrl->mutex, dma_name, RT_IPC_FLAG_PRIO);
  53. if (ctrl->dev->ofw_node)
  54. {
  55. rt_dm_dev_bind_fwdata(ctrl->dev, RT_NULL, ctrl);
  56. }
  57. return RT_EOK;
  58. }
  59. rt_err_t rt_dma_controller_unregister(struct rt_dma_controller *ctrl)
  60. {
  61. if (!ctrl)
  62. {
  63. return -RT_EINVAL;
  64. }
  65. dma_lock(ctrl);
  66. if (!rt_list_isempty(&ctrl->channels_nodes))
  67. {
  68. dma_unlock(ctrl);
  69. return -RT_EBUSY;
  70. }
  71. if (ctrl->dev->ofw_node)
  72. {
  73. rt_dm_dev_unbind_fwdata(ctrl->dev, RT_NULL);
  74. }
  75. dma_unlock(ctrl);
  76. rt_mutex_detach(&ctrl->mutex);
  77. rt_spin_lock(&dmac_nodes_lock);
  78. rt_list_remove(&ctrl->list);
  79. rt_spin_unlock(&dmac_nodes_lock);
  80. return RT_EOK;
  81. }
  82. rt_err_t rt_dma_chan_start(struct rt_dma_chan *chan)
  83. {
  84. rt_err_t err;
  85. struct rt_dma_controller *ctrl;
  86. if (!chan)
  87. {
  88. return -RT_EINVAL;
  89. }
  90. if (chan->prep_err)
  91. {
  92. LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
  93. return chan->prep_err;
  94. }
  95. ctrl = chan->ctrl;
  96. dma_lock(ctrl);
  97. err = ctrl->ops->start(chan);
  98. dma_unlock(ctrl);
  99. return err;
  100. }
  101. rt_err_t rt_dma_chan_pause(struct rt_dma_chan *chan)
  102. {
  103. rt_err_t err;
  104. struct rt_dma_controller *ctrl;
  105. if (!chan)
  106. {
  107. return -RT_EINVAL;
  108. }
  109. if (!chan->ctrl->ops->pause)
  110. {
  111. LOG_D("%s: No pause, try stop", rt_dm_dev_get_name(chan->ctrl->dev));
  112. return rt_dma_chan_stop(chan);
  113. }
  114. if (chan->prep_err)
  115. {
  116. LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
  117. return chan->prep_err;
  118. }
  119. ctrl = chan->ctrl;
  120. dma_lock(ctrl);
  121. err = ctrl->ops->pause(chan);
  122. dma_unlock(ctrl);
  123. return err;
  124. }
  125. rt_err_t rt_dma_chan_stop(struct rt_dma_chan *chan)
  126. {
  127. rt_err_t err;
  128. struct rt_dma_controller *ctrl;
  129. if (!chan)
  130. {
  131. return -RT_EINVAL;
  132. }
  133. if (chan->prep_err)
  134. {
  135. LOG_D("%s: Not prepare done", rt_dm_dev_get_name(chan->slave));
  136. return chan->prep_err;
  137. }
  138. ctrl = chan->ctrl;
  139. dma_lock(ctrl);
  140. err = ctrl->ops->stop(chan);
  141. dma_unlock(ctrl);
  142. return err;
  143. }
  144. rt_err_t rt_dma_chan_config(struct rt_dma_chan *chan,
  145. struct rt_dma_slave_config *conf)
  146. {
  147. rt_err_t err;
  148. struct rt_dma_controller *ctrl;
  149. enum rt_dma_transfer_direction dir;
  150. if (!chan || !conf)
  151. {
  152. err = -RT_EINVAL;
  153. goto _end;
  154. }
  155. dir = conf->direction;
  156. if (dir >= RT_DMA_DIR_MAX)
  157. {
  158. err = -RT_EINVAL;
  159. goto _end;
  160. }
  161. if (conf->src_addr_width >= RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX ||
  162. conf->dst_addr_width >= RT_DMA_SLAVE_BUSWIDTH_BYTES_MAX)
  163. {
  164. err = -RT_EINVAL;
  165. goto _end;
  166. }
  167. ctrl = chan->ctrl;
  168. if (!rt_bitmap_test_bit(ctrl->dir_cap, dir))
  169. {
  170. err = -RT_ENOSYS;
  171. goto _end;
  172. }
  173. if (!chan->name && dir != RT_DMA_MEM_TO_MEM)
  174. {
  175. LOG_E("%s: illegal config for uname channels",
  176. rt_dm_dev_get_name(ctrl->dev));
  177. err = -RT_EINVAL;
  178. goto _end;
  179. }
  180. dma_lock(ctrl);
  181. err = ctrl->ops->config(chan, conf);
  182. dma_unlock(ctrl);
  183. if (!err)
  184. {
  185. rt_memcpy(&chan->conf, conf, sizeof(*conf));
  186. }
  187. _end:
  188. chan->conf_err = err;
  189. return err;
  190. }
  191. rt_err_t rt_dma_chan_done(struct rt_dma_chan *chan, rt_size_t size)
  192. {
  193. if (!chan)
  194. {
  195. return -RT_EINVAL;
  196. }
  197. if (chan->callback)
  198. {
  199. chan->callback(chan, size);
  200. }
  201. return RT_EOK;
  202. }
  203. static rt_bool_t range_is_illegal(const char *name, const char *desc,
  204. rt_ubase_t addr0, rt_ubase_t addr1)
  205. {
  206. rt_bool_t illegal = addr0 < addr1;
  207. if (illegal)
  208. {
  209. LOG_E("%s: %s %p is out of config %p", name, desc, addr0, addr1);
  210. }
  211. return illegal;
  212. }
  213. static rt_bool_t addr_is_supported(const char *name, const char *desc,
  214. rt_uint64_t mask, rt_ubase_t addr)
  215. {
  216. rt_bool_t illegal = !!(addr & ~mask);
  217. if (illegal)
  218. {
  219. LOG_E("%s: %s %p is out of mask %p", name, desc, addr, mask);
  220. }
  221. return illegal;
  222. }
  223. rt_err_t rt_dma_prep_memcpy(struct rt_dma_chan *chan,
  224. struct rt_dma_slave_transfer *transfer)
  225. {
  226. rt_err_t err;
  227. rt_size_t len;
  228. rt_ubase_t dma_addr_src, dma_addr_dst;
  229. struct rt_dma_controller *ctrl;
  230. struct rt_dma_slave_config *conf;
  231. if (!chan || !transfer)
  232. {
  233. return -RT_EINVAL;
  234. }
  235. ctrl = chan->ctrl;
  236. conf = &chan->conf;
  237. if (chan->conf_err)
  238. {
  239. LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
  240. return chan->conf_err;
  241. }
  242. RT_ASSERT(chan->conf.direction == RT_DMA_MEM_TO_MEM);
  243. dma_addr_src = transfer->src_addr;
  244. dma_addr_dst = transfer->dst_addr;
  245. len = transfer->buffer_len;
  246. if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
  247. ctrl->addr_mask, conf->src_addr))
  248. {
  249. return -RT_ENOSYS;
  250. }
  251. if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
  252. ctrl->addr_mask, conf->dst_addr))
  253. {
  254. return -RT_ENOSYS;
  255. }
  256. if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
  257. dma_addr_src, conf->src_addr))
  258. {
  259. return -RT_EINVAL;
  260. }
  261. if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
  262. dma_addr_dst, conf->dst_addr))
  263. {
  264. return -RT_EINVAL;
  265. }
  266. if (ctrl->ops->prep_memcpy)
  267. {
  268. dma_lock(ctrl);
  269. err = ctrl->ops->prep_memcpy(chan, dma_addr_src, dma_addr_dst, len);
  270. dma_unlock(ctrl);
  271. }
  272. else
  273. {
  274. err = -RT_ENOSYS;
  275. }
  276. if (!err)
  277. {
  278. rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
  279. }
  280. chan->prep_err = err;
  281. return err;
  282. }
  283. rt_err_t rt_dma_prep_cyclic(struct rt_dma_chan *chan,
  284. struct rt_dma_slave_transfer *transfer)
  285. {
  286. rt_err_t err;
  287. rt_ubase_t dma_buf_addr;
  288. struct rt_dma_controller *ctrl;
  289. struct rt_dma_slave_config *conf;
  290. enum rt_dma_transfer_direction dir;
  291. if (!chan || !transfer)
  292. {
  293. return -RT_EINVAL;
  294. }
  295. ctrl = chan->ctrl;
  296. conf = &chan->conf;
  297. if (chan->conf_err)
  298. {
  299. LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
  300. return chan->conf_err;
  301. }
  302. dir = chan->conf.direction;
  303. if (dir == RT_DMA_MEM_TO_DEV || dir == RT_DMA_MEM_TO_MEM)
  304. {
  305. dma_buf_addr = transfer->src_addr;
  306. if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
  307. ctrl->addr_mask, conf->src_addr))
  308. {
  309. return -RT_ENOSYS;
  310. }
  311. if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
  312. dma_buf_addr, conf->src_addr))
  313. {
  314. return -RT_EINVAL;
  315. }
  316. }
  317. else if (dir == RT_DMA_DEV_TO_MEM)
  318. {
  319. dma_buf_addr = transfer->dst_addr;
  320. if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
  321. ctrl->addr_mask, conf->dst_addr))
  322. {
  323. return -RT_ENOSYS;
  324. }
  325. if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
  326. dma_buf_addr, conf->dst_addr))
  327. {
  328. return -RT_EINVAL;
  329. }
  330. }
  331. else
  332. {
  333. dma_buf_addr = ~0UL;
  334. }
  335. if (ctrl->ops->prep_cyclic)
  336. {
  337. dma_lock(ctrl);
  338. err = ctrl->ops->prep_cyclic(chan, dma_buf_addr,
  339. transfer->buffer_len, transfer->period_len, dir);
  340. dma_unlock(ctrl);
  341. }
  342. else
  343. {
  344. err = -RT_ENOSYS;
  345. }
  346. if (!err)
  347. {
  348. rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
  349. }
  350. chan->prep_err = err;
  351. return err;
  352. }
  353. rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
  354. struct rt_dma_slave_transfer *transfer)
  355. {
  356. rt_err_t err;
  357. rt_ubase_t dma_buf_addr;
  358. struct rt_dma_controller *ctrl;
  359. struct rt_dma_slave_config *conf;
  360. enum rt_dma_transfer_direction dir;
  361. if (!chan || !transfer)
  362. {
  363. return -RT_EINVAL;
  364. }
  365. ctrl = chan->ctrl;
  366. conf = &chan->conf;
  367. if (chan->conf_err)
  368. {
  369. LOG_D("%s: Not config done", rt_dm_dev_get_name(chan->slave));
  370. return chan->conf_err;
  371. }
  372. dir = chan->conf.direction;
  373. if (dir == RT_DMA_MEM_TO_DEV || dir == RT_DMA_MEM_TO_MEM)
  374. {
  375. dma_buf_addr = transfer->src_addr;
  376. if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "source",
  377. ctrl->addr_mask, conf->src_addr))
  378. {
  379. return -RT_ENOSYS;
  380. }
  381. if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "source",
  382. dma_buf_addr, conf->src_addr))
  383. {
  384. return -RT_EINVAL;
  385. }
  386. }
  387. else if (dir == RT_DMA_DEV_TO_MEM)
  388. {
  389. dma_buf_addr = transfer->dst_addr;
  390. if (addr_is_supported(rt_dm_dev_get_name(ctrl->dev), "dest",
  391. ctrl->addr_mask, conf->dst_addr))
  392. {
  393. return -RT_ENOSYS;
  394. }
  395. if (range_is_illegal(rt_dm_dev_get_name(ctrl->dev), "dest",
  396. dma_buf_addr, conf->dst_addr))
  397. {
  398. return -RT_EINVAL;
  399. }
  400. }
  401. else
  402. {
  403. dma_buf_addr = ~0UL;
  404. }
  405. if (ctrl->ops->prep_single)
  406. {
  407. dma_lock(ctrl);
  408. err = ctrl->ops->prep_single(chan, dma_buf_addr,
  409. transfer->buffer_len, dir);
  410. dma_unlock(ctrl);
  411. }
  412. else
  413. {
  414. err = -RT_ENOSYS;
  415. }
  416. if (!err)
  417. {
  418. rt_memcpy(&chan->transfer, transfer, sizeof(*transfer));
  419. }
  420. chan->prep_err = err;
  421. return err;
  422. }
  423. static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
  424. const char *name, struct rt_ofw_cell_args *args)
  425. {
  426. struct rt_dma_controller *ctrl = RT_NULL;
  427. #ifdef RT_USING_OFW
  428. int index;
  429. struct rt_ofw_node *np = dev->ofw_node, *ctrl_np;
  430. if (!np)
  431. {
  432. return RT_NULL;
  433. }
  434. index = rt_ofw_prop_index_of_string(np, "dma-names", name);
  435. if (index < 0)
  436. {
  437. return RT_NULL;
  438. }
  439. if (!rt_ofw_parse_phandle_cells(np, "dmas", "#dma-cells", index, args))
  440. {
  441. ctrl_np = args->data;
  442. if (!rt_ofw_data(ctrl_np))
  443. {
  444. rt_platform_ofw_request(ctrl_np);
  445. }
  446. ctrl = rt_ofw_data(ctrl_np);
  447. rt_ofw_node_put(ctrl_np);
  448. }
  449. #endif /* RT_USING_OFW */
  450. return ctrl;
  451. }
  452. struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
  453. {
  454. void *fw_data = RT_NULL;
  455. struct rt_dma_chan *chan;
  456. struct rt_ofw_cell_args dma_args;
  457. struct rt_dma_controller *ctrl = RT_NULL;
  458. if (!dev)
  459. {
  460. return rt_err_ptr(-RT_EINVAL);
  461. }
  462. if (name)
  463. {
  464. fw_data = &dma_args;
  465. ctrl = ofw_find_dma_controller(dev, name, &dma_args);
  466. }
  467. else
  468. {
  469. struct rt_dma_controller *ctrl_tmp;
  470. rt_spin_lock(&dmac_nodes_lock);
  471. rt_list_for_each_entry(ctrl_tmp, &dmac_nodes, list)
  472. {
  473. /* Only memory to memory for uname request */
  474. if (rt_bitmap_test_bit(ctrl_tmp->dir_cap, RT_DMA_MEM_TO_MEM))
  475. {
  476. ctrl = ctrl_tmp;
  477. break;
  478. }
  479. }
  480. rt_spin_unlock(&dmac_nodes_lock);
  481. }
  482. if (rt_is_err_or_null(ctrl))
  483. {
  484. return ctrl ? ctrl : rt_err_ptr(-RT_ENOSYS);
  485. }
  486. if (ctrl->ops->request_chan)
  487. {
  488. chan = ctrl->ops->request_chan(ctrl, dev, fw_data);
  489. }
  490. else
  491. {
  492. chan = rt_calloc(1, sizeof(*chan));
  493. if (!chan)
  494. {
  495. chan = rt_err_ptr(-RT_ENOMEM);
  496. }
  497. }
  498. if (rt_is_err(chan))
  499. {
  500. return chan;
  501. }
  502. if (!chan)
  503. {
  504. LOG_E("%s: unset request channels error", rt_dm_dev_get_name(ctrl->dev));
  505. return rt_err_ptr(-RT_ERROR);
  506. }
  507. chan->name = name;
  508. chan->ctrl = ctrl;
  509. chan->slave = dev;
  510. rt_list_init(&chan->list);
  511. chan->conf_err = -RT_ERROR;
  512. chan->prep_err = -RT_ERROR;
  513. dma_lock(ctrl);
  514. rt_list_insert_before(&ctrl->channels_nodes, &chan->list);
  515. dma_unlock(ctrl);
  516. return chan;
  517. }
  518. rt_err_t rt_dma_chan_release(struct rt_dma_chan *chan)
  519. {
  520. rt_err_t err = RT_EOK;
  521. if (!chan)
  522. {
  523. return -RT_EINVAL;
  524. }
  525. rt_mutex_take(&chan->ctrl->mutex, RT_WAITING_FOREVER);
  526. rt_list_remove(&chan->list);
  527. rt_mutex_release(&chan->ctrl->mutex);
  528. if (chan->ctrl->ops->release_chan)
  529. {
  530. err = chan->ctrl->ops->release_chan(chan);
  531. }
  532. else
  533. {
  534. rt_free(chan);
  535. }
  536. return err;
  537. }