drv_pdma.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /* Copyright (c) 2023, Canaan Bright Sight Co., Ltd
  2. *
  3. * Redistribution and use in source and binary forms, with or without
  4. * modification, are permitted provided that the following conditions are met:
  5. * 1. Redistributions of source code must retain the above copyright
  6. * notice, this list of conditions and the following disclaimer.
  7. * 2. Redistributions in binary form must reproduce the above copyright
  8. * notice, this list of conditions and the following disclaimer in the
  9. * documentation and/or other materials provided with the distribution.
  10. *
  11. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  12. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  13. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  14. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  15. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
  16. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  17. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  18. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  19. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  20. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  21. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  22. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. /*
  26. * Copyright (c) 2006-2025 RT-Thread Development Team
  27. *
  28. * SPDX-License-Identifier: Apache-2.0
  29. */
  30. #include <rtthread.h>
  31. #include <rthw.h>
  32. #include <rtdevice.h>
  33. #include <rtdef.h>
  34. #include <rtatomic.h>
  35. #include <stdbool.h>
  36. #include <stdlib.h>
  37. #include <stdio.h>
  38. #include <riscv_io.h>
  39. #include <mmu.h>
  40. #include <cache.h>
  41. #include <page.h>
  42. #include "board.h"
  43. #include "ioremap.h"
  44. #include "drv_hardlock.h"
  45. #include "drv_pdma.h"
  46. #include <rtdbg.h>
  47. #define DBG_TAG "drv_pdma"
  48. #ifdef RT_DEBUG
  49. #define DBG_LVL DBG_LOG
  50. #else
  51. #define DBG_LVL DBG_WARNING
  52. #endif
  53. #define DBG_COLOR
  54. /**
  55. * @brief PDMA controller instance initialization
  56. */
  57. static pdma_controller_t pdma_ctrl = {0};
  58. #define PDMA_CH_MENUCONFIG_ENABLED(ch) \
  59. (((ch) >= 0 && (ch) < PDMA_CH_MAX) ? \
  60. (pdma_ctrl.chan[(ch)].menuconfig_enabled) : \
  61. (RT_FALSE))
  62. /**
  63. * @brief Acquire PDMA hardware lock
  64. * @note Busy-waits until lock is acquired
  65. */
  66. #define PDMA_LOCK() while (kd_hardlock_lock(pdma_ctrl.hardlock) != 0)
  67. /**
  68. * @brief Release PDMA hardware lock
  69. */
  70. #define PDMA_UNLOCK() kd_hardlock_unlock(pdma_ctrl.hardlock)
  71. /*--------------------- Channel Enable Control ---------------------*/
  72. /**
  73. * @brief Enable specific PDMA channel
  74. */
  75. #define PDMA_CH_ENABLE(ch) \
  76. (pdma_write32(&pdma_ctrl.reg->pdma_ch_en, pdma_read32(&pdma_ctrl.reg->pdma_ch_en) | (1U << (ch))))
  77. /**
  78. * @brief Disable specific PDMA channel
  79. */
  80. #define PDMA_CH_DISABLE(ch) \
  81. (pdma_write32(&pdma_ctrl.reg->pdma_ch_en, pdma_read32(&pdma_ctrl.reg->pdma_ch_en) & ~(1U << (ch))))
  82. /**
  83. * @brief Check if PDMA channel is enabled
  84. */
  85. #define PDMA_CH_IS_ENABLED(ch) \
  86. (pdma_read32(&pdma_ctrl.reg->pdma_ch_en) & (1U << (ch)))
  87. /*--------------------- Interrupt Control ---------------------*/
  88. /**
  89. * @brief Enable interrupts for specific PDMA channel
  90. */
  91. #define PDMA_CH_INT_ENABLE(ch, mask) \
  92. (pdma_write32(&pdma_ctrl.reg->dma_int_mask, pdma_read32(&pdma_ctrl.reg->dma_int_mask) & ~((mask) << (ch))))
  93. /**
  94. * @brief Disable interrupts for specific PDMA channel
  95. */
  96. #define PDMA_CH_INT_DISABLE(ch, mask) \
  97. (pdma_write32(&pdma_ctrl.reg->dma_int_mask, pdma_read32(&pdma_ctrl.reg->dma_int_mask) | ((mask) << (ch))))
  98. /**
  99. * @brief Disable all interrupts for specific PDMA channel
  100. */
  101. #define PDMA_CH_INT_DISABLE_ALL(ch) \
  102. PDMA_CH_INT_DISABLE(ch, PDMA_ALL_INTS)
  103. /**
  104. * @brief Clear interrupt status for specific PDMA channel
  105. */
  106. #define PDMA_CH_INT_CLEAR(ch, intr) \
  107. (pdma_write32(&pdma_ctrl.reg->dma_int_stat, (intr) << (ch)))
  108. /**
  109. * @brief Clear all interrupt status for specific PDMA channel
  110. */
  111. #define PDMA_CH_INT_CLEAR_ALL(ch) \
  112. PDMA_CH_INT_CLEAR(ch, PDMA_ALL_INTS)
  113. /**
  114. * @brief Check if interrupt is triggered for specific PDMA channel
  115. */
  116. #define PDMA_CH_INT_IS_TRIGGERED(ch, intr) \
  117. (pdma_read32(&pdma_ctrl.reg->dma_int_stat) & ((intr) << (ch)))
  118. /*--------------------- Status Check ---------------------*/
  119. /**
  120. * @brief Check if PDMA channel is busy
  121. */
  122. #define PDMA_CH_IS_BUSY(ch) \
  123. (pdma_read32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_status) & PDMA_STATE_BUSY)
  124. /**
  125. * @brief Check if PDMA channel is paused
  126. */
  127. #define PDMA_CH_IS_PAUSED(ch) \
  128. (pdma_read32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_status) & PDMA_STATE_PAUSE)
  129. /*--------------------- Data Transfer Control ---------------------*/
  130. /**
  131. * @brief Start PDMA transfer on specific channel
  132. */
  133. #define PDMA_CH_START(ch) \
  134. (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_START))
  135. /**
  136. * @brief Stop PDMA transfer on specific channel
  137. */
  138. #define PDMA_CH_STOP(ch) \
  139. (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_STOP))
  140. /**
  141. * @brief Resume paused PDMA transfer on specific channel
  142. */
  143. #define PDMA_CH_RESUME(ch) \
  144. (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_RESUME))
  145. static void _k230_pdma_llt_free(rt_uint8_t ch);
  146. static rt_uint32_t *_k230_pdma_llt_cal(rt_uint8_t ch, usr_pdma_cfg_t *pdma_cfg);
  147. static rt_err_t _k230_pdma_safe_stop(rt_uint8_t ch, rt_uint32_t timeout_ms);
  148. /**
  149. * @brief Set callback function for specified PDMA channel
  150. * @param ch PDMA channel number
  151. * @param func Callback function pointer
  152. * @return RT_EOK on success, -RT_EINVAL on invalid parameters
  153. */
  154. rt_err_t k230_pdma_set_callback(rt_uint8_t ch, k230_pdma_callback_t func)
  155. {
  156. /* Validate channel and callback function */
  157. if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || func == RT_NULL)
  158. {
  159. return -RT_EINVAL;
  160. }
  161. /*
  162. * Safely set callback function by masking interrupts during update
  163. * This prevents potential race conditions with DMA interrupts
  164. */
  165. rt_hw_interrupt_mask(pdma_ctrl.chan[ch].irq_num);
  166. pdma_ctrl.chan[ch].cb.callback = func;
  167. rt_hw_interrupt_umask(pdma_ctrl.chan[ch].irq_num);
  168. return RT_EOK;
  169. }
  170. /**
  171. * @brief Request an available PDMA channel
  172. * @param ch [out] Pointer to store the allocated channel number
  173. * @return rt_err_t RT_EOK if success, error code otherwise
  174. */
  175. rt_err_t k230_pdma_request_channel(rt_uint8_t *ch)
  176. {
  177. if (ch == RT_NULL)
  178. {
  179. LOG_E("PDMA: Invalid channel pointer");
  180. return -RT_EINVAL;
  181. }
  182. rt_base_t level;
  183. level = rt_hw_interrupt_disable();
  184. PDMA_LOCK();
  185. for (rt_uint8_t i = 0; i < PDMA_CH_MAX; i++)
  186. {
  187. if (!PDMA_CH_MENUCONFIG_ENABLED(i))
  188. {
  189. LOG_D("PDMA: Channel %d not enabled in menuconfig", i);
  190. continue;
  191. }
  192. if (PDMA_CH_IS_ENABLED(i))
  193. {
  194. LOG_D("PDMA: Channel %d already enabled", i);
  195. continue;
  196. }
  197. PDMA_CH_ENABLE(i);
  198. LOG_D("PDMA: Trying channel %d", i);
  199. if (!PDMA_CH_IS_ENABLED(i))
  200. {
  201. LOG_W("PDMA: Channel %d failed to enable - possible hardware issue", i);
  202. continue;
  203. }
  204. if (PDMA_CH_IS_BUSY(i))
  205. {
  206. LOG_W("PDMA: Channel %d is busy, disabling", i);
  207. PDMA_CH_DISABLE(i);
  208. continue;
  209. }
  210. *ch = i;
  211. PDMA_CH_INT_DISABLE_ALL(i);
  212. PDMA_UNLOCK();
  213. rt_hw_interrupt_enable(level);
  214. pdma_ctrl.chan[i].cb.callback = RT_NULL;
  215. pdma_ctrl.chan[i].is_hw_configured = RT_FALSE;
  216. pdma_ctrl.chan[i].llt_va =RT_NULL;
  217. pdma_ctrl.chan[i].page_size = 0;
  218. rt_hw_interrupt_umask(pdma_ctrl.chan[i].irq_num);
  219. LOG_I("PDMA: Allocated channel %d", i);
  220. return RT_EOK;
  221. }
  222. *ch = PDMA_CH_MAX;
  223. PDMA_UNLOCK();
  224. rt_hw_interrupt_enable(level);
  225. LOG_E("PDMA: No available channel found");
  226. return -RT_EBUSY;
  227. }
  228. /**
  229. * @brief Release an allocated PDMA channel
  230. * @param ch Channel number to release
  231. * @return rt_err_t RT_EOK if success, error code otherwise
  232. */
  233. rt_err_t k230_pdma_release_channel(rt_uint8_t ch)
  234. {
  235. rt_base_t level;
  236. level = rt_hw_interrupt_disable();
  237. PDMA_LOCK();
  238. /* Validate channel configuration and status */
  239. if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
  240. {
  241. PDMA_UNLOCK();
  242. rt_hw_interrupt_enable(level);
  243. LOG_E("PDMA: Invalid channel %d to release", ch);
  244. return -RT_EINVAL;
  245. }
  246. PDMA_UNLOCK();
  247. rt_hw_interrupt_enable(level);
  248. rt_hw_interrupt_mask(pdma_ctrl.chan[ch].irq_num);
  249. /* Clear any registered callback */
  250. pdma_ctrl.chan[ch].cb.callback = RT_NULL;
  251. /* Safely stop DMA operation and release resources */
  252. rt_err_t err = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
  253. if (err != RT_EOK)
  254. {
  255. LOG_E("PDMA: Failed to safely stop channel %d (err:%d)", ch, err);
  256. return err;
  257. }
  258. pdma_ctrl.chan[ch].is_hw_configured = RT_FALSE;
  259. /* Disable the channel */
  260. level = rt_hw_interrupt_disable();
  261. PDMA_LOCK();
  262. PDMA_CH_DISABLE(ch);
  263. PDMA_UNLOCK();
  264. rt_hw_interrupt_enable(level);
  265. LOG_I("PDMA: Channel %d released successfully", ch);
  266. return RT_EOK;
  267. }
  268. /**
  269. * @brief Start a PDMA channel operation
  270. * @param ch The channel number to start
  271. * @return RT_EOK on success, error code on failure
  272. */
  273. rt_err_t k230_pdma_start(rt_uint8_t ch)
  274. {
  275. rt_base_t level;
  276. level = rt_hw_interrupt_disable();
  277. PDMA_LOCK();
  278. LOG_D("Starting PDMA channel %d", ch);
  279. /* Basic channel validation */
  280. if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
  281. {
  282. LOG_E("Channel %d not enabled in menuconfig or not enabled", ch);
  283. PDMA_UNLOCK();
  284. rt_hw_interrupt_enable(level);
  285. return -RT_EINVAL;
  286. }
  287. /* Only start DMA if channel is properly configured to prevent unclosable channel */
  288. if (pdma_ctrl.chan[ch].is_hw_configured == RT_FALSE)
  289. {
  290. LOG_E("Channel %d not properly configured", ch);
  291. PDMA_UNLOCK();
  292. rt_hw_interrupt_enable(level);
  293. return -RT_ERROR;
  294. }
  295. /* Enable completion, pause and timeout interrupts */
  296. PDMA_CH_INT_ENABLE(ch, PDMA_PDONE_INT | PDMA_PPAUSE_INT | PDMA_PTOUT_INT);
  297. PDMA_UNLOCK();
  298. rt_hw_interrupt_enable(level);
  299. /* Start the channel operation */
  300. PDMA_CH_START(ch);
  301. LOG_I("Successfully started PDMA channel %d", ch);
  302. /* Clear configuration flag */
  303. pdma_ctrl.chan[ch].is_hw_configured == RT_FALSE;
  304. return RT_EOK;
  305. }
  306. /**
  307. * @brief Stop an active PDMA channel operation
  308. * @param ch The channel number to stop
  309. * @return RT_EOK on success, error code on failure
  310. */
  311. rt_err_t k230_pdma_stop(rt_uint8_t ch)
  312. {
  313. rt_base_t level;
  314. level = rt_hw_interrupt_disable();
  315. PDMA_LOCK();
  316. LOG_D("Attempting to stop PDMA channel %d", ch);
  317. /* Basic channel validation */
  318. if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
  319. {
  320. LOG_E("Channel %d not enabled in menuconfig or not enabled", ch);
  321. PDMA_UNLOCK();
  322. rt_hw_interrupt_enable(level);
  323. return -RT_EINVAL;
  324. }
  325. PDMA_UNLOCK();
  326. rt_hw_interrupt_enable(level);
  327. /* Safely stop the channel operation */
  328. rt_err_t ret = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
  329. if (ret == RT_EOK)
  330. {
  331. LOG_I("Successfully stopped PDMA channel %d", ch);
  332. }
  333. else
  334. {
  335. LOG_E("Failed to stop PDMA channel %d (error: %d)", ch, ret);
  336. }
  337. return ret;
  338. }
  339. /**
  340. * @brief Convert PDMA channel configuration structure to register value
  341. * @param cfg Pointer to the channel configuration structure
  342. * @return 32-bit register value representing the configuration
  343. */
  344. static rt_uint32_t _k230_pdma_ch_cfg_to_reg(const pdma_ch_cfg_t *cfg)
  345. {
  346. rt_uint32_t reg = 0;
  347. /* Source type configuration */
  348. reg |= (cfg->ch_src_type & 0x1) << 0;
  349. /* Device horizontal size */
  350. reg |= (cfg->ch_dev_hsize & 0x3) << 1;
  351. /* Data endianness configuration */
  352. reg |= (cfg->ch_dat_endian & 0x3) << 4;
  353. /* Device burst length */
  354. reg |= (cfg->ch_dev_blen & 0xF) << 8;
  355. /* Channel priority */
  356. reg |= (cfg->ch_priority & 0xF) << 12;
  357. /* Device timeout */
  358. reg |= (cfg->ch_dev_tout & 0xFFF) << 16;
  359. return reg;
  360. }
  361. /**
  362. * @brief Configure PDMA channel with user settings
  363. * @param ch Channel number to configure
  364. * @param ucfg Pointer to user configuration structure
  365. * @return RT_EOK on success, error code on failure
  366. */
  367. static rt_err_t _k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg)
  368. {
  369. volatile rt_uint32_t *ch_cfg = (volatile rt_uint32_t*)(&(pdma_ctrl.reg->pdma_ch_reg[ch].ch_cfg));
  370. LOG_D("Configuring PDMA channel %d", ch);
  371. /* Convert configuration to register format */
  372. rt_uint32_t reg_val = _k230_pdma_ch_cfg_to_reg(&ucfg->pdma_ch_cfg);
  373. /* Write configuration to hardware registers */
  374. pdma_write32(ch_cfg, reg_val);
  375. pdma_write32(&(pdma_ctrl.reg->ch_peri_dev_sel[ch]), ucfg->device);
  376. LOG_I("PDMA channel %d configured successfully", ch);
  377. return RT_EOK;
  378. }
  379. /**
  380. * @brief Validate user configuration parameters
  381. * @param ucfg Pointer to user configuration structure
  382. * @return RT_EOK if valid, error code if invalid
  383. */
  384. static rt_err_t _k230_ucfg_check(usr_pdma_cfg_t *ucfg)
  385. {
  386. /* Parameter NULL check */
  387. if (ucfg == RT_NULL)
  388. {
  389. LOG_E("Configuration pointer is NULL");
  390. return -RT_EINVAL;
  391. }
  392. /* Device range validation */
  393. if ((ucfg->device > PDM_IN) || (ucfg->device < UART0_TX))
  394. {
  395. LOG_E("Invalid device selection: %d", ucfg->device);
  396. return -RT_EINVAL;
  397. }
  398. /* Validate peripheral data word width */
  399. if ((ucfg->pdma_ch_cfg.ch_dev_hsize > PSBYTE4) ||
  400. (ucfg->pdma_ch_cfg.ch_dev_hsize < PSBYTE1))
  401. {
  402. LOG_E("Invalid peripheral data width: %d (1-4 bytes supported)",
  403. ucfg->pdma_ch_cfg.ch_dev_hsize);
  404. return -RT_EINVAL;
  405. }
  406. /* Address and size alignment check */
  407. if (((rt_uintptr_t)ucfg->src_addr % 4) ||
  408. ((rt_uintptr_t)ucfg->dst_addr % 4) ||
  409. (ucfg->line_size % 4))
  410. {
  411. LOG_E("Alignment error - src: 0x%08X, dst: 0x%08X, size: %d",
  412. ucfg->src_addr, ucfg->dst_addr, ucfg->line_size);
  413. return -RT_EINVAL;
  414. }
  415. LOG_D("User configuration validation passed");
  416. return RT_EOK;
  417. }
  418. /**
  419. * @brief Configure a PDMA channel with user settings
  420. * @param ch Channel number to configure (0-PDMA_MAX_CHANNELS-1)
  421. * @param ucfg Pointer to user configuration structure
  422. * @return RT_EOK on success, error code on failure
  423. */
  424. rt_err_t k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg)
  425. {
  426. rt_err_t err;
  427. rt_base_t level;
  428. LOG_D("[CH%d] Starting PDMA configuration", ch);
  429. /* Enter critical section */
  430. level = rt_hw_interrupt_disable();
  431. PDMA_LOCK();
  432. /* Channel availability check */
  433. if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
  434. {
  435. LOG_E("[CH%d] Channel not enabled in menuconfig or hardware", ch);
  436. PDMA_UNLOCK();
  437. rt_hw_interrupt_enable(level);
  438. return -RT_EINVAL;
  439. }
  440. PDMA_UNLOCK();
  441. rt_hw_interrupt_enable(level);
  442. /* Validate user configuration */
  443. err = _k230_ucfg_check(ucfg);
  444. if (err != RT_EOK)
  445. {
  446. LOG_E("[CH%d] Configuration validation failed", ch);
  447. return err;
  448. }
  449. /* Safely stop channel if active */
  450. err = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
  451. if (err != RT_EOK)
  452. {
  453. LOG_E("[CH%d] Failed to stop channel (err: %d)", ch, err);
  454. return err;
  455. }
  456. /* Apply hardware configuration */
  457. _k230_pdma_config(ch, ucfg);
  458. LOG_D("[CH%d] Hardware registers configured", ch);
  459. /* Build DMA transfer linked list */
  460. rt_uint32_t* llt_saddr = _k230_pdma_llt_cal(ch, ucfg);
  461. if (llt_saddr == RT_NULL)
  462. {
  463. LOG_E("[CH%d] Failed to allocate memory for linked list", ch);
  464. return -RT_ENOMEM;
  465. }
  466. /* Program linked list starting address */
  467. pdma_write32(&(pdma_ctrl.reg->pdma_ch_reg[ch].ch_llt_saddr), (rt_uint32_t)(rt_uintptr_t)llt_saddr);
  468. LOG_D("[CH%d] Linked list programmed (addr: 0x%p)", ch, llt_saddr);
  469. /* Mark channel as configured */
  470. pdma_ctrl.chan[ch].is_hw_configured = RT_TRUE;
  471. LOG_I("[CH%d] Configuration completed successfully", ch);
  472. return RT_EOK;
  473. }
  474. /**
  475. * @brief Safely stop a PDMA channel operation
  476. * @param ch Channel number to stop (0-PDMA_MAX_CHANNELS-1)
  477. * @param timeout_ms Maximum wait time in milliseconds (0 for no timeout)
  478. * @return RT_EOK on success, -RT_ETIMEOUT on timeout, other errors
  479. */
  480. static rt_err_t _k230_pdma_safe_stop(rt_uint8_t ch, rt_uint32_t timeout_ms)
  481. {
  482. rt_err_t err = RT_EOK;
  483. rt_tick_t start_tick;
  484. LOG_D("[CH%d] Attempting safe stop (timeout: %dms)", ch, timeout_ms);
  485. /* Immediately request channel stop */
  486. PDMA_CH_STOP(ch);
  487. /* Wait for channel to become inactive */
  488. start_tick = rt_tick_get();
  489. while (PDMA_CH_IS_BUSY(ch))
  490. {
  491. /* Check for timeout if specified */
  492. if (timeout_ms > 0 &&
  493. (rt_tick_get_delta(start_tick) >= rt_tick_from_millisecond(timeout_ms)))
  494. {
  495. LOG_E("[CH%d] Stop operation timed out", ch);
  496. return -RT_ETIMEOUT;
  497. }
  498. rt_thread_mdelay(1);
  499. }
  500. /* Enter critical section for register cleanup */
  501. rt_base_t level = rt_hw_interrupt_disable();
  502. PDMA_LOCK();
  503. /* Clear and disable all interrupts */
  504. PDMA_CH_INT_CLEAR_ALL(ch);
  505. PDMA_CH_INT_DISABLE_ALL(ch);
  506. LOG_D("[CH%d] Interrupts cleared and disabled", ch);
  507. PDMA_UNLOCK();
  508. rt_hw_interrupt_enable(level);
  509. /* Free linked list memory */
  510. _k230_pdma_llt_free(ch);
  511. LOG_D("[CH%d] Linked list memory freed", ch);
  512. pdma_ctrl.chan[ch].is_hw_configured = RT_FALSE;
  513. LOG_I("[CH%d] Successfully stopped", ch);
  514. return RT_EOK;
  515. }
  516. /**
  517. * @brief Calculate and allocate PDMA linked list table (LLT)
  518. * @param ch Channel number (0-PDMA_MAX_CHANNELS-1)
  519. * @param pdma_cfg Pointer to PDMA configuration structure
  520. * @return Physical address of LLT on success, RT_NULL on failure
  521. */
  522. static rt_uint32_t *_k230_pdma_llt_cal(rt_uint8_t ch, usr_pdma_cfg_t *pdma_cfg)
  523. {
  524. rt_int32_t i;
  525. rt_uint32_t list_num;
  526. pdma_llt_t *llt_list;
  527. rt_bool_t mem_to_dev;
  528. LOG_D("[CH%d] Calculating LLT parameters", ch);
  529. /* Calculate number of LLT entries needed */
  530. list_num = (pdma_cfg->line_size - 1) / PDMA_MAX_LINE_SIZE + 1;
  531. LOG_D("[CH%d] Line size: %d, requires %d LLT entries",
  532. ch, pdma_cfg->line_size, list_num);
  533. /* Determine transfer direction */
  534. mem_to_dev = (pdma_cfg->pdma_ch_cfg.ch_src_type == CONTINUE) ? RT_TRUE : RT_FALSE;
  535. LOG_D("[CH%d] Transfer direction: %s", ch, mem_to_dev ? "Memory->Device" : "Device->Memory");
  536. /* Allocate memory for LLT */
  537. pdma_ctrl.chan[ch].page_size = rt_page_bits(sizeof(pdma_llt_t) * list_num);
  538. llt_list = (pdma_llt_t *)rt_pages_alloc(pdma_ctrl.chan[ch].page_size);
  539. if (llt_list == RT_NULL)
  540. {
  541. pdma_ctrl.chan[ch].page_size = 0 ;
  542. LOG_E("[CH%d] Failed to allocate memory for LLT", ch);
  543. return RT_NULL;
  544. }
  545. LOG_D("[CH%d] Allocated %d bytes for LLT", ch, sizeof(pdma_llt_t) * list_num);
  546. pdma_ctrl.chan[ch].llt_va = llt_list;
  547. /* Initialize LLT entries */
  548. for (i = 0; i < list_num; i++)
  549. {
  550. /* Set source and destination addresses */
  551. if (mem_to_dev)
  552. {
  553. llt_list[i].src_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->src_addr + PDMA_MAX_LINE_SIZE * i);
  554. llt_list[i].dst_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->dst_addr); /* Device address remains fixed */
  555. }
  556. else
  557. {
  558. llt_list[i].src_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->src_addr); /* Device address remains fixed */
  559. llt_list[i].dst_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->dst_addr + PDMA_MAX_LINE_SIZE * i);
  560. }
  561. /* Set transfer size and next pointer */
  562. if (i == list_num - 1)
  563. {
  564. /* Last entry uses remaining size */
  565. llt_list[i].line_size = (pdma_cfg->line_size % PDMA_MAX_LINE_SIZE) ?
  566. (pdma_cfg->line_size % PDMA_MAX_LINE_SIZE) :
  567. PDMA_MAX_LINE_SIZE;
  568. llt_list[i].next_llt_addr = 0; /* Terminate list */
  569. LOG_D("[CH%d] Last LLT entry: size=%d", ch, llt_list[i].line_size);
  570. }
  571. else
  572. {
  573. llt_list[i].line_size = PDMA_MAX_LINE_SIZE;
  574. /* Convert virtual address of next entry to physical address */
  575. void *next_llt_va = &llt_list[i+1];
  576. llt_list[i].next_llt_addr = (rt_uint32_t)(intptr_t)rt_kmem_v2p(next_llt_va);
  577. }
  578. llt_list[i].pause = 0;
  579. }
  580. /* Handle cache coherency based on transfer direction */
  581. if (mem_to_dev)
  582. {
  583. /* Memory to Device: clean source data cache */
  584. void *src_va = rt_kmem_p2v(pdma_cfg->src_addr);
  585. rt_hw_cpu_dcache_clean(src_va, pdma_cfg->line_size);
  586. LOG_D("[CH%d] Cleaned source cache (va: %p, size: %d)",
  587. ch, src_va, pdma_cfg->line_size);
  588. }
  589. else
  590. {
  591. /* Device to Memory: invalidate destination cache */
  592. void *dst_va = rt_kmem_p2v(pdma_cfg->dst_addr);
  593. rt_hw_cpu_dcache_invalidate(dst_va, pdma_cfg->line_size);
  594. LOG_D("[CH%d] Invalidated destination cache (va: %p, size: %d)",
  595. ch, dst_va, pdma_cfg->line_size);
  596. }
  597. /* Ensure LLT is visible to DMA */
  598. rt_hw_cpu_dcache_clean((void*)llt_list, sizeof(pdma_llt_t) * list_num);
  599. LOG_D("[CH%d] Cleaned LLT cache (va: %p, size: %d)",
  600. ch, llt_list, sizeof(pdma_llt_t) * list_num);
  601. /* Return physical address of LLT */
  602. void *llt_list_pa = rt_kmem_v2p(llt_list);
  603. LOG_I("[CH%d] LLT calculation complete (pa: %p)", ch, llt_list_pa);
  604. return (rt_uint32_t *)llt_list_pa;
  605. }
  606. /**
  607. * @brief Free allocated PDMA linked list table (LLT) memory
  608. * @param ch Channel number (0-PDMA_MAX_CHANNELS-1) to free
  609. */
  610. static void _k230_pdma_llt_free(rt_uint8_t ch)
  611. {
  612. rt_uint32_t *llt_list_pa;
  613. void *llt_list_va;
  614. LOG_D("[CH%d] Freeing LLT memory", ch);
  615. if(pdma_ctrl.chan[ch].llt_va != RT_NULL)
  616. {
  617. /* Free the allocated pages */
  618. rt_pages_free(pdma_ctrl.chan[ch].llt_va, pdma_ctrl.chan[ch].page_size);
  619. pdma_ctrl.chan[ch].llt_va = 0;
  620. pdma_ctrl.chan[ch].page_size = 0;
  621. LOG_D("[CH%d] Freed %d bytes of LLT memory", ch,pdma_ctrl.chan[ch].page_size);
  622. }
  623. }
  624. /**
  625. * @brief PDMA interrupt service routine
  626. * @param irq Interrupt number (unused)
  627. * @param param Channel number passed as void pointer
  628. */
  629. static void k230_pdma_isr(int irq, void *param)
  630. {
  631. rt_uint8_t ch = (rt_uintptr_t)param; /* Convert channel parameter */
  632. rt_bool_t success = RT_FALSE; /* Transfer result flag */
  633. k230_pdma_callback_t callback = RT_NULL; /* Callback function pointer */
  634. LOG_D("[CH%d] PDMA interrupt triggered", ch);
  635. PDMA_LOCK();
  636. /* Only process interrupts for enabled channels */
  637. if (PDMA_CH_MENUCONFIG_ENABLED(ch) && PDMA_CH_IS_ENABLED(ch))
  638. {
  639. /* Check for transfer complete interrupt */
  640. if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PDONE_INT))
  641. {
  642. success = RT_TRUE;
  643. callback = pdma_ctrl.chan[ch].cb.callback;
  644. LOG_D("[CH%d] Transfer complete", ch);
  645. }
  646. /* Check for timeout interrupt */
  647. else if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PTOUT_INT))
  648. {
  649. success = RT_FALSE;
  650. callback = pdma_ctrl.chan[ch].cb.callback;
  651. LOG_E("[CH%d] Transfer timeout", ch);
  652. }
  653. /* Check for pause interrupt */
  654. else if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PPAUSE_INT))
  655. {
  656. PDMA_CH_RESUME(ch);
  657. LOG_D("[CH%d] Transfer resumed", ch);
  658. }
  659. /* Clear all interrupt flags for this channel */
  660. PDMA_CH_INT_CLEAR_ALL(ch);
  661. LOG_D("[CH%d] Interrupts cleared", ch);
  662. }
  663. PDMA_UNLOCK();
  664. if (callback)
  665. {
  666. callback(ch, success);
  667. }
  668. }
  669. /**
  670. * @brief Initialize PDMA hardware device
  671. * @return RT_EOK on success, error code on failure
  672. */
  673. int rt_hw_pdma_device_init(void)
  674. {
  675. LOG_I("Initializing PDMA controller");
  676. /* Map PDMA registers */
  677. pdma_ctrl.reg = rt_ioremap((void *)DMA_BASE_ADDR, DMA_IO_SIZE);
  678. if (RT_NULL == pdma_ctrl.reg)
  679. {
  680. LOG_E("Failed to map PDMA registers");
  681. return -RT_ERROR;
  682. }
  683. LOG_D("Mapped PDMA registers at 0x%08X", DMA_BASE_ADDR);
  684. if (kd_request_lock(HARDLOCK_PDMA))
  685. {
  686. pdma_ctrl.hardlock = -1;
  687. rt_iounmap(pdma_ctrl.reg);
  688. LOG_E("Failed to acquire PDMA hardware lock");
  689. return -RT_ERROR;
  690. }
  691. pdma_ctrl.hardlock = HARDLOCK_PDMA;
  692. LOG_D("Acquired PDMA hardware lock");
  693. /* Install and enable interrupts for configured channels */
  694. #if defined(BSP_USING_PDMA_CHANNEL0)
  695. pdma_ctrl.chan[PDMA_CH_0].menuconfig_enabled = RT_TRUE;
  696. pdma_ctrl.chan[PDMA_CH_0].irq_num = PDMA_CHANNEL0_IRQn;
  697. rt_hw_interrupt_install(PDMA_CHANNEL0_IRQn, k230_pdma_isr, (void *)PDMA_CH_0, "pdma_ch0");
  698. LOG_D("Enabled interrupts for channel 0");
  699. #endif
  700. #if defined(BSP_USING_PDMA_CHANNEL1)
  701. pdma_ctrl.chan[PDMA_CH_1].menuconfig_enabled = RT_TRUE;
  702. pdma_ctrl.chan[PDMA_CH_1].irq_num = PDMA_CHANNEL1_IRQn;
  703. rt_hw_interrupt_install(PDMA_CHANNEL1_IRQn, k230_pdma_isr, (void *)PDMA_CH_1, "pdma_ch1");
  704. LOG_D("Enabled interrupts for channel 1");
  705. #endif
  706. #if defined(BSP_USING_PDMA_CHANNEL2)
  707. pdma_ctrl.chan[PDMA_CH_2].menuconfig_enabled = RT_TRUE;
  708. pdma_ctrl.chan[PDMA_CH_2].irq_num = PDMA_CHANNEL2_IRQn;
  709. rt_hw_interrupt_install(PDMA_CHANNEL2_IRQn, k230_pdma_isr, (void *)PDMA_CH_2, "pdma_ch2");
  710. LOG_D("Enabled interrupts for channel 2");
  711. #endif
  712. #if defined(BSP_USING_PDMA_CHANNEL3)
  713. pdma_ctrl.chan[PDMA_CH_3].menuconfig_enabled = RT_TRUE;
  714. pdma_ctrl.chan[PDMA_CH_3].irq_num = PDMA_CHANNEL3_IRQn;
  715. rt_hw_interrupt_install(PDMA_CHANNEL3_IRQn, k230_pdma_isr, (void *)PDMA_CH_3, "pdma_ch3");
  716. LOG_D("Enabled interrupts for channel 3");
  717. #endif
  718. #if defined(BSP_USING_PDMA_CHANNEL4)
  719. pdma_ctrl.chan[PDMA_CH_4].menuconfig_enabled = RT_TRUE;
  720. pdma_ctrl.chan[PDMA_CH_4].irq_num = PDMA_CHANNEL4_IRQn;
  721. rt_hw_interrupt_install(PDMA_CHANNEL4_IRQn, k230_pdma_isr, (void *)PDMA_CH_4, "pdma_ch4");
  722. LOG_D("Enabled interrupts for channel 4");
  723. #endif
  724. #if defined(BSP_USING_PDMA_CHANNEL5)
  725. pdma_ctrl.chan[PDMA_CH_5].menuconfig_enabled = RT_TRUE;
  726. pdma_ctrl.chan[PDMA_CH_5].irq_num = PDMA_CHANNEL5_IRQn;
  727. rt_hw_interrupt_install(PDMA_CHANNEL5_IRQn, k230_pdma_isr, (void *)PDMA_CH_5, "pdma_ch5");
  728. LOG_D("Enabled interrupts for channel 5");
  729. #endif
  730. #if defined(BSP_USING_PDMA_CHANNEL6)
  731. pdma_ctrl.chan[PDMA_CH_6].menuconfig_enabled = RT_TRUE;
  732. pdma_ctrl.chan[PDMA_CH_6].irq_num = PDMA_CHANNEL6_IRQn;
  733. rt_hw_interrupt_install(PDMA_CHANNEL6_IRQn, k230_pdma_isr, (void *)PDMA_CH_6, "pdma_ch6");
  734. LOG_D("Enabled interrupts for channel 6");
  735. #endif
  736. #if defined(BSP_USING_PDMA_CHANNEL7)
  737. pdma_ctrl.chan[PDMA_CH_7].menuconfig_enabled = RT_TRUE;
  738. pdma_ctrl.chan[PDMA_CH_7].irq_num = PDMA_CHANNEL7_IRQn;
  739. rt_hw_interrupt_install(PDMA_CHANNEL7_IRQn, k230_pdma_isr, (void *)PDMA_CH_7, "pdma_ch7");
  740. LOG_D("Enabled interrupts for channel 7");
  741. #endif
  742. return RT_EOK;
  743. }
  744. INIT_BOARD_EXPORT(rt_hw_pdma_device_init);