hal_dma.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. /*
  2. * Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
  3. *
  4. * Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
  5. * the the people's Republic of China and other countries.
  6. * All Allwinner Technology Co.,Ltd. trademarks are used with permission.
  7. *
  8. * DISCLAIMER
  9. * THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
  10. * IF YOU NEED TO INTEGRATE THIRD PARTY’S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
  11. * IN ALLWINNERS’SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
  12. * ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
  13. * ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
  14. * COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
  15. * YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY’S TECHNOLOGY.
  16. *
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
  19. * PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
  20. * WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
  21. * THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
  22. * OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  23. * IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  25. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  26. * LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
  27. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  28. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  30. * OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #include <string.h>
  33. #include <stdint.h>
  34. #include <stdlib.h>
  35. #include <stdio.h>
  36. #include <hal_reset.h>
  37. #include <hal_cache.h>
  38. #include <hal_mem.h>
  39. #include <hal_atomic.h>
  40. #include <hal_clk.h>
  41. #include <hal_interrupt.h>
  42. #include <interrupt.h>
  43. #include <sunxi_hal_common.h>
  44. #include <hal_dma.h>
  45. #define DMA_ERR(fmt, arg...) printf("%s()%d " fmt, __func__, __LINE__, ##arg)
  46. static struct sunxi_dma_chan dma_chan_source[NR_MAX_CHAN];
  47. static hal_spinlock_t dma_lock;
  48. /*
  49. * Fix sconfig's bus width according to at_dmac.
  50. * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2, 8bytes -> 3
  51. */
  52. static inline uint32_t convert_buswidth(enum dma_slave_buswidth addr_width)
  53. {
  54. if (addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES)
  55. {
  56. return 0;
  57. }
  58. switch (addr_width)
  59. {
  60. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  61. return 1;
  62. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  63. return 2;
  64. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  65. return 3;
  66. default:
  67. /* For 1 byte width or fallback */
  68. return 0;
  69. }
  70. }
  71. static inline void convert_burst(uint32_t *maxburst)
  72. {
  73. switch (*maxburst)
  74. {
  75. case 1:
  76. *maxburst = 0;
  77. break;
  78. case 4:
  79. *maxburst = 1;
  80. break;
  81. case 8:
  82. *maxburst = 2;
  83. break;
  84. case 16:
  85. *maxburst = 3;
  86. break;
  87. default:
  88. printf("unknown maxburst\n");
  89. *maxburst = 0;
  90. break;
  91. }
  92. }
  93. static inline void sunxi_cfg_lli(struct sunxi_dma_lli *lli, uint32_t src_addr,
  94. uint32_t dst_addr, uint32_t len,
  95. struct dma_slave_config *config)
  96. {
  97. uint32_t src_width = 0, dst_width = 0;
  98. if (NULL == lli && NULL == config)
  99. {
  100. return;
  101. }
  102. src_width = convert_buswidth(config->src_addr_width);
  103. dst_width = convert_buswidth(config->dst_addr_width);
  104. lli->cfg = SRC_BURST(config->src_maxburst) | \
  105. SRC_WIDTH(src_width) | \
  106. DST_BURST(config->dst_maxburst) | \
  107. DST_WIDTH(dst_width);
  108. lli->src = src_addr;
  109. lli->dst = dst_addr;
  110. lli->len = len;
  111. lli->para = NORMAL_WAIT;
  112. }
  113. static void sunxi_dump_lli(struct sunxi_dma_chan *chan, struct sunxi_dma_lli *lli)
  114. {
  115. #ifdef DMA_DEBUG
  116. printf("channum:%x\n"
  117. "\t\tdesc:desc - 0x%08x desc p - 0x%08x desc v - 0x%08x\n"
  118. "\t\tlli: v- 0x%08x v_lln - 0x%08x s - 0x%08x d - 0x%08x\n"
  119. "\t\tlen - 0x%08x para - 0x%08x p_lln - 0x%08x\n",
  120. chan->chan_count,
  121. (uint32_t)chan->desc, (uint32_t)chan->desc->p_lln, (uint32_t)chan->desc->vlln,
  122. (uint32_t)lli, (uint32_t)lli->vlln, (uint32_t)lli->src,
  123. (uint32_t)lli->dst, (uint32_t)lli->len, (uint32_t)lli->para, (uint32_t)lli->p_lln);
  124. #endif
  125. }
  126. static void sunxi_dump_com_regs(void)
  127. {
  128. #ifdef DMA_DEBUG
  129. printf("Common register:\n"
  130. "\tmask0: 0x%08x\n"
  131. "\tmask1: 0x%08x\n"
  132. "\tpend0: 0x%08x\n"
  133. "\tpend1: 0x%08x\n"
  134. #ifdef DMA_SECURE
  135. "\tsecur: 0x%08x\n"
  136. #endif
  137. #ifdef DMA_GATE
  138. "\t_gate: 0x%08x\n"
  139. #endif
  140. "\tstats: 0x%08x\n",
  141. (uint32_t)hal_readl(DMA_IRQ_EN(0)),
  142. (uint32_t)hal_readl(DMA_IRQ_EN(1)),
  143. (uint32_t)hal_readl(DMA_IRQ_STAT(0)),
  144. (uint32_t)hal_readl(DMA_IRQ_STAT(1)),
  145. #ifdef DMA_SECURE
  146. (uint32_t)hal_readl(DMA_SECURE),
  147. #endif
  148. #ifdef DMA_GATE
  149. (uint32_t)hal_readl(DMA_GATE),
  150. #endif
  151. (uint32_t)hal_readl(DMA_STAT));
  152. #endif
  153. }
  154. static inline void sunxi_dump_chan_regs(struct sunxi_dma_chan *ch)
  155. {
  156. #ifdef DMA_DEBUG
  157. u32 chan_num = ch->chan_count;
  158. printf("Chan %d reg:\n"
  159. "\t___en: \t0x%08x\n"
  160. "\tpause: \t0x%08x\n"
  161. "\tstart: \t0x%08x\n"
  162. "\t__cfg: \t0x%08x\n"
  163. "\t__src: \t0x%08x\n"
  164. "\t__dst: \t0x%08x\n"
  165. "\tcount: \t0x%08x\n"
  166. "\t_para: \t0x%08x\n\n",
  167. chan_num,
  168. (uint32_t)hal_readl(DMA_ENABLE(chan_num)),
  169. (uint32_t)hal_readl(DMA_PAUSE(chan_num)),
  170. (uint32_t)hal_readl(DMA_LLI_ADDR(chan_num)),
  171. (uint32_t)hal_readl(DMA_CFG(chan_num)),
  172. (uint32_t)hal_readl(DMA_CUR_SRC(chan_num)),
  173. (uint32_t)hal_readl(DMA_CUR_DST(chan_num)),
  174. (uint32_t)hal_readl(DMA_CNT(chan_num)),
  175. (uint32_t)hal_readl(DMA_PARA(chan_num)));
  176. #endif
  177. }
  178. static void *sunxi_lli_list(struct sunxi_dma_lli *prev, struct sunxi_dma_lli *next,
  179. struct sunxi_dma_chan *chan)
  180. {
  181. if ((!prev && !chan) || !next)
  182. {
  183. return NULL;
  184. }
  185. if (!prev)
  186. {
  187. chan->desc = next;
  188. chan->desc->p_lln = __va_to_pa((unsigned long)next);
  189. chan->desc->vlln = next;
  190. }
  191. else
  192. {
  193. prev->p_lln = __va_to_pa((unsigned long)next);
  194. prev->vlln = next;
  195. }
  196. next->p_lln = LINK_END;
  197. next->vlln = NULL;
  198. return next;
  199. }
  200. static irqreturn_t sunxi_dma_irq_handle(int irq, void *ptr)
  201. {
  202. uint32_t status_l = 0, status_h = 0;
  203. int i = 0;
  204. status_l = hal_readl(DMA_IRQ_STAT(0));
  205. #if NR_MAX_CHAN + START_CHAN_OFFSET > HIGH_CHAN
  206. status_h = hal_readl(DMA_IRQ_STAT(1));
  207. #endif
  208. hal_writel(status_l, DMA_IRQ_STAT(0));
  209. #if NR_MAX_CHAN + START_CHAN_OFFSET > HIGH_CHAN
  210. hal_writel(status_h, DMA_IRQ_STAT(1));
  211. #endif
  212. for (i = 0; i < NR_MAX_CHAN; i++)
  213. {
  214. uint32_t __cpsr;
  215. struct sunxi_dma_chan *chan = &dma_chan_source[i];
  216. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  217. uint32_t chan_num = chan->chan_count;
  218. uint32_t status = 0;
  219. if (chan->used == 0)
  220. {
  221. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  222. continue;
  223. }
  224. status = (chan_num + START_CHAN_OFFSET >= HIGH_CHAN) \
  225. ? (status_h >> ((chan_num + START_CHAN_OFFSET - HIGH_CHAN) << 2)) \
  226. : (status_l >> ((chan_num + START_CHAN_OFFSET) << 2));
  227. if (!(chan->irq_type & status))
  228. {
  229. goto unlock;
  230. }
  231. if (chan->cyclic)
  232. {
  233. dma_callback cb = NULL;
  234. void *cb_data = NULL;
  235. chan->periods_pos ++;
  236. if (chan->periods_pos * chan->desc->len >= chan->buf_len)
  237. {
  238. chan->periods_pos = 0;
  239. }
  240. cb = chan->callback;
  241. cb_data = chan->callback_param;
  242. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  243. if (cb)
  244. {
  245. cb(cb_data);
  246. }
  247. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  248. }
  249. else
  250. {
  251. dma_callback cb = NULL;
  252. void *cb_data = NULL;
  253. cb = chan->callback;
  254. cb_data = chan->callback_param;
  255. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  256. if (cb)
  257. {
  258. cb(cb_data);
  259. }
  260. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  261. }
  262. unlock:
  263. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  264. }
  265. return 0;
  266. }
  267. static int sunxi_dma_clk_init(bool enable)
  268. {
  269. hal_clk_status_t ret;
  270. hal_reset_type_t reset_type = HAL_SUNXI_RESET;
  271. u32 reset_id;
  272. hal_clk_type_t clk_type = HAL_SUNXI_CCU;
  273. hal_clk_id_t clk_id;
  274. hal_clk_t clk;
  275. struct reset_control *reset;
  276. clk_id = SUNXI_CLK_DMA;
  277. reset_id = SUNXI_RST_DMA;
  278. if (enable)
  279. {
  280. reset = hal_reset_control_get(reset_type, reset_id);
  281. hal_reset_control_deassert(reset);
  282. hal_reset_control_put(reset);
  283. hal_clock_enable(hal_clock_get(clk_type, SUNXI_CLK_MBUS_DMA));
  284. clk = hal_clock_get(clk_type, clk_id);
  285. ret = hal_clock_enable(clk);
  286. if (ret != HAL_CLK_STATUS_OK)
  287. DMA_ERR("DMA clock enable failed.\n");
  288. }
  289. else
  290. {
  291. clk = hal_clock_get(clk_type, clk_id);
  292. ret = hal_clock_disable(clk);
  293. if (ret != HAL_CLK_STATUS_OK)
  294. DMA_ERR("DMA clock disable failed.\n");
  295. hal_clock_disable(hal_clock_get(clk_type, SUNXI_CLK_MBUS_DMA));
  296. hal_clock_put(clk);
  297. }
  298. return ret;
  299. }
  300. void sunxi_dma_free_ill(struct sunxi_dma_chan *chan)
  301. {
  302. struct sunxi_dma_lli *li_adr = NULL, *next = NULL;
  303. if (NULL == chan)
  304. {
  305. DMA_ERR("[dma] chan is NULL\n");
  306. return ;
  307. }
  308. li_adr = chan->desc;
  309. chan->desc = NULL;
  310. while (li_adr)
  311. {
  312. next = li_adr->vlln;
  313. dma_free_coherent(li_adr);
  314. li_adr = next;
  315. }
  316. chan->callback = NULL;
  317. chan->callback_param = NULL;
  318. }
  319. hal_dma_chan_status_t hal_dma_chan_request(struct sunxi_dma_chan **dma_chan)
  320. {
  321. int i = 0;
  322. struct sunxi_dma_chan *chan;
  323. uint32_t __cpsr;
  324. for (i = 0; i < NR_MAX_CHAN; i++)
  325. {
  326. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  327. chan = &dma_chan_source[i];
  328. if (chan->used == 0)
  329. {
  330. chan->used = 1;
  331. chan->chan_count = i;
  332. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  333. *dma_chan = &dma_chan_source[i];
  334. return HAL_DMA_CHAN_STATUS_FREE;
  335. }
  336. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  337. }
  338. return HAL_DMA_CHAN_STATUS_BUSY;
  339. }
  340. hal_dma_status_t hal_dma_prep_memcpy(struct sunxi_dma_chan *chan,
  341. uint32_t dest, uint32_t src, uint32_t len)
  342. {
  343. struct sunxi_dma_lli *l_item = NULL;
  344. struct dma_slave_config *config = NULL;
  345. uint32_t __cpsr;
  346. if ((NULL == chan) || (dest == 0 || src == 0))
  347. {
  348. DMA_ERR("[dma] chan is NULL\n");
  349. return HAL_DMA_STATUS_INVALID_PARAMETER;
  350. }
  351. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  352. l_item = (struct sunxi_dma_lli *)dma_alloc_coherent(sizeof(struct sunxi_dma_lli));
  353. if (!l_item)
  354. {
  355. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  356. return HAL_DMA_STATUS_NO_MEM;
  357. }
  358. memset(l_item, 0, sizeof(struct sunxi_dma_lli));
  359. config = &chan->cfg;
  360. dest = __va_to_pa(dest);
  361. src = __va_to_pa(src);
  362. sunxi_cfg_lli(l_item, src, dest, len, config);
  363. l_item->cfg |= SRC_DRQ(DRQSRC_SDRAM) \
  364. | DST_DRQ(DRQDST_SDRAM) \
  365. | DST_LINEAR_MODE \
  366. | SRC_LINEAR_MODE;
  367. sunxi_lli_list(NULL, l_item, chan);
  368. sunxi_dump_lli(chan, l_item);
  369. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  370. return HAL_DMA_STATUS_OK;
  371. }
  372. hal_dma_status_t hal_dma_prep_device(struct sunxi_dma_chan *chan,
  373. uint32_t dest, uint32_t src,
  374. uint32_t len, enum dma_transfer_direction dir)
  375. {
  376. struct sunxi_dma_lli *l_item = NULL;
  377. struct dma_slave_config *config = NULL;
  378. uint32_t __cpsr;
  379. if ((NULL == chan) || (dest == 0 || src == 0))
  380. {
  381. DMA_ERR("[dma] chan is NULL\n");
  382. return HAL_DMA_STATUS_INVALID_PARAMETER;
  383. }
  384. l_item = (struct sunxi_dma_lli *)dma_alloc_coherent(sizeof(struct sunxi_dma_lli));
  385. if (!l_item)
  386. {
  387. return HAL_DMA_STATUS_NO_MEM;
  388. }
  389. memset(l_item, 0, sizeof(struct sunxi_dma_lli));
  390. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  391. config = &chan->cfg;
  392. if (dir == DMA_MEM_TO_DEV)
  393. {
  394. src = __va_to_pa(src);
  395. sunxi_cfg_lli(l_item, src, dest, len, config);
  396. l_item->cfg |= GET_DST_DRQ(config->slave_id) \
  397. | SRC_LINEAR_MODE \
  398. | DST_IO_MODE \
  399. | SRC_DRQ(DRQSRC_SDRAM);
  400. }
  401. else if (dir == DMA_DEV_TO_MEM)
  402. {
  403. dest = __va_to_pa(dest);
  404. sunxi_cfg_lli(l_item, src, dest, len, config);
  405. l_item ->cfg |= GET_SRC_DRQ(config->slave_id) \
  406. | DST_LINEAR_MODE \
  407. | SRC_IO_MODE \
  408. | DST_DRQ(DRQSRC_SDRAM);
  409. }
  410. else if (dir == DMA_DEV_TO_DEV)
  411. {
  412. sunxi_cfg_lli(l_item, src, dest, len, config);
  413. l_item->cfg |= GET_SRC_DRQ(config->slave_id) \
  414. | DST_IO_MODE \
  415. | SRC_IO_MODE \
  416. | GET_DST_DRQ(config->slave_id);
  417. }
  418. sunxi_lli_list(NULL, l_item, chan);
  419. sunxi_dump_lli(chan, l_item);
  420. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  421. return HAL_DMA_STATUS_OK;
  422. }
  423. hal_dma_status_t hal_dma_prep_cyclic(struct sunxi_dma_chan *chan,
  424. uint32_t buf_addr, uint32_t buf_len,
  425. uint32_t period_len, enum dma_transfer_direction dir)
  426. {
  427. struct sunxi_dma_lli *l_item = NULL, *prev = NULL;
  428. uint32_t periods = buf_len / period_len;
  429. struct dma_slave_config *config = NULL;
  430. uint32_t i = 0;
  431. uint32_t __cpsr;
  432. if ((NULL == chan) || (0 == buf_addr))
  433. {
  434. DMA_ERR("[dma] chan or buf_addr is NULL\n");
  435. return HAL_DMA_STATUS_INVALID_PARAMETER;
  436. }
  437. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  438. if (chan->desc) {
  439. struct sunxi_dma_lli *li_adr = NULL, *next = NULL;
  440. li_adr = chan->desc;
  441. chan->desc = NULL;
  442. while (li_adr)
  443. {
  444. next = li_adr->vlln;
  445. dma_free_coherent(li_adr);
  446. li_adr = next;
  447. }
  448. }
  449. config = &chan->cfg;
  450. for (i = 0; i < periods; i++)
  451. {
  452. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  453. l_item = (struct sunxi_dma_lli *)dma_alloc_coherent(sizeof(struct sunxi_dma_lli));
  454. if (!l_item)
  455. {
  456. return HAL_DMA_STATUS_NO_MEM;
  457. }
  458. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  459. memset(l_item, 0, sizeof(struct sunxi_dma_lli));
  460. if (dir == DMA_MEM_TO_DEV)
  461. {
  462. sunxi_cfg_lli(l_item, __va_to_pa(buf_addr + period_len * i),
  463. config->dst_addr, period_len, config);
  464. l_item->cfg |= GET_DST_DRQ(config->slave_id) \
  465. | SRC_LINEAR_MODE \
  466. | DST_IO_MODE \
  467. | SRC_DRQ(DRQSRC_SDRAM);
  468. }
  469. else if (dir == DMA_DEV_TO_MEM)
  470. {
  471. sunxi_cfg_lli(l_item, config->src_addr, \
  472. __va_to_pa(buf_addr + period_len * i), \
  473. period_len, config);
  474. l_item ->cfg |= GET_SRC_DRQ(config->slave_id) \
  475. | DST_LINEAR_MODE \
  476. | SRC_IO_MODE \
  477. | DST_DRQ(DRQSRC_SDRAM);
  478. }
  479. else if (dir == DMA_DEV_TO_DEV)
  480. {
  481. sunxi_cfg_lli(l_item, config->src_addr, \
  482. config->dst_addr, period_len, config);
  483. l_item->cfg |= GET_SRC_DRQ(config->slave_id) \
  484. | DST_IO_MODE \
  485. | SRC_IO_MODE \
  486. | GET_DST_DRQ(config->slave_id);
  487. }
  488. prev = sunxi_lli_list(prev, l_item, chan);
  489. }
  490. prev->p_lln = __va_to_pa((unsigned long)chan->desc);
  491. chan->cyclic = true;
  492. #ifdef DMA_DEBUG
  493. for (prev = chan->desc; prev != NULL; prev = prev->vlln)
  494. {
  495. sunxi_dump_lli(chan, prev);
  496. }
  497. #endif
  498. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  499. return HAL_DMA_STATUS_OK;
  500. }
  501. hal_dma_status_t hal_dma_callback_install(struct sunxi_dma_chan *chan,
  502. dma_callback callback,
  503. void *callback_param)
  504. {
  505. if (NULL == chan)
  506. {
  507. DMA_ERR("[dma] chan is NULL\n");
  508. return HAL_DMA_STATUS_INVALID_PARAMETER;
  509. }
  510. if (NULL == callback)
  511. {
  512. DMA_ERR("[dma] callback is NULL\n");
  513. return HAL_DMA_STATUS_INVALID_PARAMETER;
  514. }
  515. if (NULL == callback_param)
  516. {
  517. DMA_ERR("[dma] callback_param is NULL\n");
  518. return HAL_DMA_STATUS_INVALID_PARAMETER;
  519. }
  520. chan->callback = callback;
  521. chan->callback_param = callback_param;
  522. return HAL_DMA_STATUS_OK;
  523. }
  524. hal_dma_status_t hal_dma_slave_config(struct sunxi_dma_chan *chan,
  525. struct dma_slave_config *config)
  526. {
  527. uint32_t __cpsr;
  528. if (NULL == config || NULL == chan)
  529. {
  530. DMA_ERR("[dma] dma config is NULL\n");
  531. return HAL_DMA_STATUS_INVALID_PARAMETER;
  532. }
  533. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  534. convert_burst(&config->src_maxburst);
  535. convert_burst(&config->dst_maxburst);
  536. memcpy((void *) & (chan->cfg), (void *)config, sizeof(struct dma_slave_config));
  537. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  538. return HAL_DMA_STATUS_OK;
  539. }
  540. enum dma_status hal_dma_tx_status(struct sunxi_dma_chan *chan, uint32_t *left_size)
  541. {
  542. uint32_t i = 0;
  543. struct sunxi_dma_lli *l_item = NULL;
  544. enum dma_status status = DMA_INVALID_PARAMETER;
  545. uint32_t __cpsr;
  546. if (NULL == chan || NULL == left_size)
  547. {
  548. DMA_ERR("[dma] chan or left_size is NULL\n");
  549. return HAL_DMA_STATUS_INVALID_PARAMETER;
  550. }
  551. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  552. if (chan->cyclic)
  553. {
  554. for (i = 0, l_item = chan->desc; i <= chan->periods_pos; i++, l_item = l_item->vlln)
  555. {
  556. if (NULL == l_item)
  557. {
  558. *left_size = 0;
  559. status = DMA_COMPLETE;
  560. goto unlock;
  561. }
  562. }
  563. if (NULL == l_item)
  564. {
  565. *left_size = 0;
  566. status = DMA_COMPLETE;
  567. }
  568. else
  569. {
  570. uint32_t pos = 0;
  571. bool count = false;
  572. pos = hal_readl(DMA_LLI_ADDR(chan->chan_count));
  573. *left_size = hal_readl(DMA_CNT(chan->chan_count));
  574. if (pos == LINK_END)
  575. {
  576. status = DMA_COMPLETE;
  577. goto unlock;
  578. }
  579. for (l_item = chan->desc; l_item != NULL; l_item = l_item->vlln)
  580. {
  581. if (l_item->p_lln == pos)
  582. {
  583. count = true;
  584. continue;
  585. }
  586. if (count)
  587. {
  588. *left_size += l_item->len;
  589. }
  590. }
  591. status = DMA_IN_PROGRESS;
  592. }
  593. }
  594. else
  595. {
  596. *left_size = hal_readl(DMA_CNT(chan->chan_count));
  597. if (*left_size == 0)
  598. {
  599. status = DMA_COMPLETE;
  600. }
  601. else
  602. {
  603. status = DMA_IN_PROGRESS;
  604. }
  605. }
  606. unlock:
  607. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  608. return status;
  609. }
  610. hal_dma_status_t hal_dma_start(struct sunxi_dma_chan *chan)
  611. {
  612. uint32_t high = 0;
  613. uint32_t irq_val = 0;
  614. struct sunxi_dma_lli *prev = NULL;
  615. uint32_t __cpsr;
  616. if (NULL == chan)
  617. {
  618. DMA_ERR("[dma] chan is NULL\n");
  619. return HAL_DMA_STATUS_INVALID_PARAMETER;
  620. }
  621. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  622. if (chan->cyclic)
  623. chan->irq_type = IRQ_PKG;
  624. else
  625. chan->irq_type = IRQ_QUEUE;
  626. high = (chan->chan_count + START_CHAN_OFFSET >= HIGH_CHAN) ? 1 : 0;
  627. irq_val = hal_readl(DMA_IRQ_EN(high));
  628. irq_val |= SHIFT_IRQ_MASK(chan->irq_type, chan->chan_count);
  629. hal_writel(irq_val, DMA_IRQ_EN(high));
  630. SET_OP_MODE(chan->chan_count, SRC_HS_MASK | DST_HS_MASK);
  631. for (prev = chan->desc; prev != NULL; prev = prev->vlln)
  632. {
  633. hal_dcache_clean((unsigned long)prev, sizeof(*prev));
  634. /* k_dcache_clean(prev, sizeof(*prev)); */
  635. //k_dcache_clean(prev->src, prev->len);
  636. //k_dcache_clean_invalidate(prev->dst, prev->len);
  637. }
  638. hal_writel(__va_to_pa((unsigned long)chan->desc), DMA_LLI_ADDR(chan->chan_count));
  639. hal_writel(CHAN_START, DMA_ENABLE(chan->chan_count));
  640. sunxi_dump_com_regs();
  641. sunxi_dump_chan_regs(chan);
  642. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  643. return HAL_DMA_STATUS_OK;
  644. }
  645. hal_dma_status_t hal_dma_stop(struct sunxi_dma_chan *chan)
  646. {
  647. uint32_t __cpsr;
  648. if (NULL == chan)
  649. {
  650. DMA_ERR("[dma] chan is NULL\n");
  651. return HAL_DMA_STATUS_INVALID_PARAMETER;
  652. }
  653. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  654. /*We should entry PAUSE state first to avoid missing data
  655. * count witch transferring on bus.
  656. */
  657. hal_writel(CHAN_PAUSE, DMA_PAUSE(chan->chan_count));
  658. hal_writel(CHAN_STOP, DMA_ENABLE(chan->chan_count));
  659. hal_writel(CHAN_RESUME, DMA_PAUSE(chan->chan_count));
  660. if (chan->cyclic)
  661. {
  662. chan->cyclic = false;
  663. }
  664. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  665. return HAL_DMA_STATUS_OK;
  666. }
  667. hal_dma_status_t hal_dma_chan_free(struct sunxi_dma_chan *chan)
  668. {
  669. uint32_t high = 0;
  670. unsigned long irq_val = 0;
  671. uint32_t __cpsr;
  672. if (NULL == chan)
  673. {
  674. DMA_ERR("[dma] chan is NULL\n");
  675. return HAL_DMA_STATUS_INVALID_PARAMETER;
  676. }
  677. if (!chan->used)
  678. {
  679. return HAL_DMA_STATUS_INVALID_PARAMETER;
  680. }
  681. __cpsr = hal_spin_lock_irqsave(&dma_lock);
  682. high = (chan->chan_count + START_CHAN_OFFSET >= HIGH_CHAN) ? 1 : 0;
  683. irq_val = hal_readl(DMA_IRQ_EN(high));
  684. irq_val &= ~(SHIFT_IRQ_MASK(chan->irq_type, chan->chan_count));
  685. hal_writel(irq_val, DMA_IRQ_EN(high));
  686. chan->used = 0;
  687. hal_spin_unlock_irqrestore(&dma_lock, __cpsr);
  688. sunxi_dma_free_ill(chan);
  689. return HAL_DMA_STATUS_OK;
  690. }
  691. hal_dma_status_t hal_dma_chan_desc_free(struct sunxi_dma_chan *chan)
  692. {
  693. /* FIXME: Interrupt context cannot release memory in melis OS. */
  694. if (hal_interrupt_get_nest() <= 0)
  695. {
  696. sunxi_dma_free_ill(chan);
  697. return HAL_DMA_STATUS_OK;
  698. }
  699. /* Freeing memory in interrupt is not allowed */
  700. return HAL_DMA_STATUS_ERR_PERM;
  701. }
  702. /* only need to be executed once */
  703. void hal_dma_init(void)
  704. {
  705. uint32_t i = 0, high = 0;
  706. memset((void *)dma_chan_source, 0, NR_MAX_CHAN * sizeof(struct sunxi_dma_chan));
  707. for (i = 0; i < NR_MAX_CHAN; i++)
  708. {
  709. high = (i >= HIGH_CHAN) ? 1 : 0;
  710. /*disable all dma irq*/
  711. hal_writel(0, DMA_IRQ_EN(high));
  712. /*clear all dma irq pending*/
  713. hal_writel(0xffffffff, DMA_IRQ_STAT(high));
  714. }
  715. /* disable auto gating */
  716. hal_writel(DMA_MCLK_GATE | DMA_COMMON_GATE | DMA_CHAN_GATE, DMA_GATE);
  717. sunxi_dma_clk_init(true);
  718. /*request dma irq*/
  719. if (request_irq(DMA_IRQ_NUM, sunxi_dma_irq_handle, 0, "dma-ctl", NULL) < 0)
  720. {
  721. DMA_ERR("[dma] request irq error\n");
  722. }
  723. enable_irq(DMA_IRQ_NUM);
  724. }