drv_sdhi.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /*
  2. * Copyright (c) 2006-2025, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-11-03 mazhiyuan first version
  9. */
  10. #include <drv_sdhi.h>
  11. #define RTHW_SDIO_LOCK(_sdio) rt_mutex_take(&_sdio->mutex, RT_WAITING_FOREVER)
  12. #define RTHW_SDIO_UNLOCK(_sdio) rt_mutex_release(&_sdio->mutex);
  13. struct rthw_sdio
  14. {
  15. struct rt_mmcsd_host *host;
  16. struct ra_sdhi sdhi_des;
  17. struct rt_event event;
  18. struct rt_mutex mutex;
  19. rt_uint8_t *cache_buf;
  20. };
  21. #ifdef BSP_USING_SDHI0
  22. static struct rt_mmcsd_host *host0;
  23. #endif
  24. #ifdef BSP_USING_SDHI1
  25. static struct rt_mmcsd_host *host1;
  26. #endif
  27. rt_align(SDIO_ALIGN_LEN)
  28. static rt_uint8_t cache_buf1[SDIO_BUFF_SIZE], cache_buf2[SDIO_BUFF_SIZE];
  29. rt_err_t command_send(sdhi_instance_ctrl_t *p_ctrl, struct rt_mmcsd_cmd *cmd)
  30. {
  31. uint32_t wait_bit;
  32. uint32_t timeout = BUSY_TIMEOUT_US;
  33. volatile sdhi_event_t event;
  34. struct rt_mmcsd_data *data = cmd->data;
  35. while (SD_INFO2_CBSY_SDD0MON_IDLE_VAL !=
  36. (p_ctrl->p_reg->SD_INFO2 & SD_INFO2_CBSY_SDD0MON_IDLE_MASK))
  37. {
  38. if (timeout == 0)
  39. {
  40. return -RT_ETIMEOUT;
  41. }
  42. R_BSP_SoftwareDelay(1U, BSP_DELAY_UNITS_MICROSECONDS);
  43. timeout--;
  44. }
  45. p_ctrl->p_reg->SD_INFO1 = 0U;
  46. p_ctrl->p_reg->SD_INFO2 = 0U;
  47. p_ctrl->sdhi_event.word = 0U;
  48. /* Enable response end interrupt. */
  49. /* Disable access end interrupt and enable response end interrupt. */
  50. uint32_t mask = p_ctrl->p_reg->SD_INFO1_MASK;
  51. mask &= (~SDHI_INFO1_RESPONSE_END);
  52. mask |= SDHI_INFO1_ACCESS_END;
  53. p_ctrl->p_reg->SD_INFO1_MASK = mask;
  54. p_ctrl->p_reg->SD_INFO2_MASK = SDHI_INFO2_MASK_CMD_SEND;
  55. /* Write argument, then command to the SDHI peripheral. */
  56. p_ctrl->p_reg->SD_ARG = cmd->arg & UINT16_MAX;
  57. p_ctrl->p_reg->SD_ARG1 = cmd->arg >> 16;
  58. if ((cmd->flags & CMD_MASK) == CMD_ADTC)
  59. {
  60. cmd->cmd_code |= SDHI_CMD_ADTC_EN;
  61. switch (cmd->flags & RESP_MASK)
  62. {
  63. case RESP_R1:
  64. case RESP_R5:
  65. case RESP_R6:
  66. case RESP_R7:
  67. cmd->cmd_code |= SDHI_CMD_RESP_TYPE_EXT_R1_R5_R6_R7;
  68. break;
  69. case RESP_R1B:
  70. cmd->cmd_code |= SDHI_CMD_RESP_TYPE_EXT_R1B;
  71. break;
  72. case RESP_R2:
  73. cmd->cmd_code |= SDHI_CMD_RESP_TYPE_EXT_R2;
  74. break;
  75. case RESP_R3:
  76. case RESP_R4:
  77. cmd->cmd_code |= SDHI_CMD_RESP_TYPE_EXT_R3_R4;
  78. break;
  79. case RESP_NONE:
  80. cmd->cmd_code |= SDHI_CMD_RESP_TYPE_EXT_NONE;
  81. break;
  82. }
  83. if (data != RT_NULL)
  84. {
  85. if ((data->flags & 7) == DATA_DIR_WRITE)
  86. {
  87. cmd->cmd_code &= ~SDHI_CMD_DATA_DIR_READ;
  88. }
  89. else if ((data->flags & 7) == DATA_DIR_READ)
  90. {
  91. cmd->cmd_code |= SDHI_CMD_DATA_DIR_READ;
  92. }
  93. }
  94. if (data->blks > 1)
  95. {
  96. cmd->cmd_code |= SDHI_BLK_TRANSFER;
  97. cmd->cmd_code |= SDHI_BLK_NOT_AUTO_STOP;
  98. }
  99. }
  100. p_ctrl->p_reg->SD_CMD = cmd->cmd_code;
  101. timeout = 100000;
  102. while (true)
  103. {
  104. /* Check for updates to the event status. */
  105. event.word = p_ctrl->sdhi_event.word;
  106. /* Return an error if a hardware error occurred. */
  107. if (event.bit.event_error)
  108. {
  109. cmd->err = -RT_ERROR;
  110. if ((event.word & HW_SDHI_ERR_CRCE) && (resp_type(cmd) & (RESP_R3 | RESP_R4)))
  111. {
  112. if ((cmd->flags & RESP_MASK) == RESP_R2)
  113. {
  114. cmd->resp[0] = (p_ctrl->p_reg->SD_RSP76 << 8) | (p_ctrl->p_reg->SD_RSP54 >> 24);
  115. cmd->resp[1] = (p_ctrl->p_reg->SD_RSP54 << 8) | (p_ctrl->p_reg->SD_RSP32 >> 24);
  116. cmd->resp[2] = (p_ctrl->p_reg->SD_RSP32 << 8) | (p_ctrl->p_reg->SD_RSP10 >> 24);
  117. cmd->resp[3] = (p_ctrl->p_reg->SD_RSP10 << 8);
  118. }
  119. else
  120. {
  121. cmd->resp[0] = p_ctrl->p_reg->SD_RSP10;
  122. }
  123. cmd->err = RT_EOK;
  124. }
  125. if (event.word & HW_SDHI_ERR_RTIMEOUT)
  126. {
  127. cmd->err = -RT_ETIMEOUT;
  128. }
  129. if (event.word & HW_SDHI_ERR_DTIMEOUT)
  130. {
  131. data->err = -RT_ETIMEOUT;
  132. }
  133. return -RT_ERROR;
  134. }
  135. if (data != RT_NULL)
  136. {
  137. wait_bit = SDHI_WAIT_ACCESS_BIT;
  138. }
  139. else
  140. {
  141. wait_bit = SDHI_WAIT_RESPONSE_BIT;
  142. }
  143. /* If the requested bit is set, return success. */
  144. if (event.word & (1U << wait_bit))
  145. {
  146. cmd->err = RT_EOK;
  147. if ((cmd->flags & RESP_MASK) == RESP_R2)
  148. {
  149. cmd->resp[0] = (p_ctrl->p_reg->SD_RSP76 << 8) | (p_ctrl->p_reg->SD_RSP54 >> 24);
  150. cmd->resp[1] = (p_ctrl->p_reg->SD_RSP54 << 8) | (p_ctrl->p_reg->SD_RSP32 >> 24);
  151. cmd->resp[2] = (p_ctrl->p_reg->SD_RSP32 << 8) | (p_ctrl->p_reg->SD_RSP10 >> 24);
  152. cmd->resp[3] = (p_ctrl->p_reg->SD_RSP10 << 8);
  153. }
  154. else
  155. {
  156. cmd->resp[0] = p_ctrl->p_reg->SD_RSP10;
  157. }
  158. return RT_EOK;
  159. }
  160. /* Check for timeout. */
  161. timeout--;
  162. if (0U == timeout)
  163. {
  164. cmd->err = -RT_ETIMEOUT;
  165. return -RT_ERROR;
  166. }
  167. /* Wait 1 us for consistent loop timing. */
  168. R_BSP_SoftwareDelay(1U, BSP_DELAY_UNITS_MICROSECONDS);
  169. }
  170. }
  171. rt_err_t transfer_write(sdhi_instance_ctrl_t *const p_ctrl,
  172. uint32_t block_count,
  173. uint32_t bytes,
  174. const uint8_t *p_data)
  175. {
  176. transfer_info_t *p_info = p_ctrl->p_cfg->p_lower_lvl_transfer->p_cfg->p_info;
  177. /* When the SD_DMAEN.DMAEN bit is 1, set the SD_INFO2_MASK.BWEM bit to 1 and the SD_INFO2_MASK.BREM bit to 1. */
  178. p_ctrl->p_reg->SD_INFO2_MASK |= 0x300U;
  179. p_ctrl->p_reg->SD_DMAEN = 0x2U;
  180. uint32_t transfer_settings = (uint32_t)TRANSFER_MODE_BLOCK << TRANSFER_SETTINGS_MODE_BITS;
  181. transfer_settings |= TRANSFER_ADDR_MODE_INCREMENTED << TRANSFER_SETTINGS_SRC_ADDR_BITS;
  182. transfer_settings |= TRANSFER_SIZE_4_BYTE << TRANSFER_SETTINGS_SIZE_BITS;
  183. #if SDMMC_CFG_UNALIGNED_ACCESS_ENABLE
  184. if ((0U != ((uint32_t)p_data & 0x3U)) || (0U != (bytes & 3U)))
  185. {
  186. transfer_settings |= TRANSFER_IRQ_EACH << TRANSFER_SETTINGS_IRQ_BITS;
  187. transfer_settings |= TRANSFER_REPEAT_AREA_SOURCE << TRANSFER_SETTINGS_REPEAT_AREA_BITS;
  188. /* If the pointer is not 4-byte aligned or the number of bytes is not a multiple of 4, use a temporary buffer.
  189. * Transfer the first block to the temporary buffer before enabling the transfer. Subsequent blocks will be
  190. * transferred from the user buffer to the temporary buffer in an interrupt after each block transfer. */
  191. rt_memcpy((void *)&p_ctrl->aligned_buff[0], p_data, bytes);
  192. p_info->p_src = &p_ctrl->aligned_buff[0];
  193. p_ctrl->transfer_block_current = 1U;
  194. p_ctrl->transfer_blocks_total = block_count;
  195. p_ctrl->p_transfer_data = (uint8_t *)&p_data[bytes];
  196. p_ctrl->transfer_dir = SDHI_TRANSFER_DIR_WRITE;
  197. p_ctrl->transfer_block_size = bytes;
  198. }
  199. else
  200. #endif
  201. {
  202. p_info->p_src = p_data;
  203. }
  204. p_info->transfer_settings_word = transfer_settings;
  205. p_info->p_dest = (uint32_t *)(&p_ctrl->p_reg->SD_BUF0);
  206. p_info->num_blocks = (uint16_t)block_count;
  207. /* Round up to the nearest multiple of 4 bytes for the transfer. */
  208. uint32_t words = (bytes + (sizeof(uint32_t) - 1U)) / sizeof(uint32_t);
  209. p_info->length = (uint16_t)words;
  210. /* Configure the transfer driver to write to the SD buffer. */
  211. fsp_err_t err = p_ctrl->p_cfg->p_lower_lvl_transfer->p_api->reconfigure(p_ctrl->p_cfg->p_lower_lvl_transfer->p_ctrl,
  212. p_ctrl->p_cfg->p_lower_lvl_transfer->p_cfg->p_info);
  213. if (FSP_SUCCESS != err)
  214. return -RT_ERROR;
  215. return RT_EOK;
  216. }
  217. rt_err_t transfer_read(sdhi_instance_ctrl_t *const p_ctrl,
  218. uint32_t block_count,
  219. uint32_t bytes,
  220. void *p_data)
  221. {
  222. transfer_info_t *p_info = p_ctrl->p_cfg->p_lower_lvl_transfer->p_cfg->p_info;
  223. /* When the SD_DMAEN.DMAEN bit is 1, set the SD_INFO2_MASK.BWEM bit to 1 and the SD_INFO2_MASK.BREM bit to 1. */
  224. p_ctrl->p_reg->SD_INFO2_MASK |= 0X300U;
  225. p_ctrl->p_reg->SD_DMAEN = 0x2U;
  226. uint32_t transfer_settings = (uint32_t)TRANSFER_MODE_BLOCK << TRANSFER_SETTINGS_MODE_BITS;
  227. transfer_settings |= TRANSFER_ADDR_MODE_INCREMENTED << TRANSFER_SETTINGS_DEST_ADDR_BITS;
  228. transfer_settings |= TRANSFER_SIZE_4_BYTE << TRANSFER_SETTINGS_SIZE_BITS;
  229. #if SDMMC_CFG_UNALIGNED_ACCESS_ENABLE
  230. /* If the pointer is not 4-byte aligned or the number of bytes is not a multiple of 4, use a temporary buffer.
  231. * Data will be transferred from the temporary buffer into the user buffer in an interrupt after each block transfer. */
  232. if ((0U != ((uint32_t)p_data & 0x3U)) || (0U != (bytes & 3U)))
  233. {
  234. transfer_settings |= TRANSFER_IRQ_EACH << TRANSFER_SETTINGS_IRQ_BITS;
  235. p_info->p_dest = &p_ctrl->aligned_buff[0];
  236. p_ctrl->transfer_block_current = 0U;
  237. p_ctrl->transfer_blocks_total = block_count;
  238. p_ctrl->p_transfer_data = (uint8_t *)p_data;
  239. p_ctrl->transfer_dir = SDHI_TRANSFER_DIR_READ;
  240. p_ctrl->transfer_block_size = bytes;
  241. }
  242. else
  243. #endif
  244. {
  245. transfer_settings |= TRANSFER_REPEAT_AREA_SOURCE << TRANSFER_SETTINGS_REPEAT_AREA_BITS;
  246. p_info->p_dest = p_data;
  247. }
  248. p_info->transfer_settings_word = transfer_settings;
  249. p_info->p_src = (uint32_t *)(&p_ctrl->p_reg->SD_BUF0);
  250. p_info->num_blocks = (uint16_t)block_count;
  251. /* Round up to the nearest multiple of 4 bytes for the transfer. */
  252. uint32_t words = (bytes + (sizeof(uint32_t) - 1U)) / sizeof(uint32_t);
  253. p_info->length = (uint16_t)words;
  254. /* Configure the transfer driver to read from the SD buffer. */
  255. fsp_err_t err = p_ctrl->p_cfg->p_lower_lvl_transfer->p_api->reconfigure(p_ctrl->p_cfg->p_lower_lvl_transfer->p_ctrl,
  256. p_ctrl->p_cfg->p_lower_lvl_transfer->p_cfg->p_info);
  257. if (err != FSP_SUCCESS)
  258. return -RT_ERROR;
  259. return RT_EOK;
  260. }
  261. void ra_sdhi_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
  262. {
  263. struct rthw_sdio *sdio = host->private_data;
  264. struct rt_mmcsd_data *data;
  265. static rt_uint8_t *buffer;
  266. RTHW_SDIO_LOCK(sdio);
  267. if (req->cmd != RT_NULL)
  268. {
  269. data = req->cmd->data;
  270. if (data != RT_NULL)
  271. {
  272. rt_uint32_t size = data->blks * data->blksize;
  273. RT_ASSERT(size <= SDIO_BUFF_SIZE);
  274. /* 使用 SDIO WiFi(cyw43438) 时不使用数据流模式 */
  275. #if defined(SOC_SERIES_R7KA8P1)
  276. if (data->flags & DATA_STREAM)
  277. {
  278. data->flags &= ~DATA_STREAM;
  279. }
  280. #endif
  281. buffer = (rt_uint8_t *)data->buf;
  282. if ((rt_uint32_t)data->buf & (SDIO_ALIGN_LEN - 1))
  283. {
  284. buffer = sdio->cache_buf;
  285. if (data->flags & DATA_DIR_WRITE)
  286. {
  287. rt_memcpy(sdio->cache_buf, data->buf, size);
  288. }
  289. }
  290. if (data->flags & DATA_DIR_WRITE)
  291. {
  292. transfer_write(sdio->sdhi_des.instance->p_ctrl, data->blks, data->blksize, buffer);
  293. }
  294. else if (data->flags & DATA_DIR_READ)
  295. {
  296. transfer_read(sdio->sdhi_des.instance->p_ctrl, data->blks, data->blksize, buffer);
  297. #if defined(__DCACHE_PRESENT)
  298. SCB_CleanInvalidateDCache();
  299. #endif
  300. }
  301. /* Set the sector count. */
  302. if (data->blks > 1U)
  303. {
  304. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_STOP = 0x100U;
  305. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_SECCNT = data->blks;
  306. }
  307. else
  308. {
  309. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_STOP = 0U;
  310. }
  311. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_SIZE = data->blksize;
  312. }
  313. rt_enter_critical();
  314. command_send(sdio->sdhi_des.instance->p_ctrl, req->cmd);
  315. rt_exit_critical();
  316. if ((data != RT_NULL) && (data->flags & DATA_DIR_READ) && ((rt_uint32_t)data->buf & (SDIO_ALIGN_LEN - 1)))
  317. {
  318. #if defined(__DCACHE_PRESENT)
  319. SCB_CleanInvalidateDCache_by_Addr((uint32_t*)((uint32_t)sdio->cache_buf & ~(32U - 1U)), data->blks * data->blksize + 32U);
  320. #endif
  321. rt_memcpy(data->buf, sdio->cache_buf, data->blksize * data->blks);
  322. }
  323. }
  324. if (req->stop != RT_NULL)
  325. {
  326. rt_enter_critical();
  327. command_send(sdio->sdhi_des.instance->p_ctrl, req->stop);
  328. rt_exit_critical();
  329. }
  330. RTHW_SDIO_UNLOCK(sdio);
  331. mmcsd_req_complete(sdio->host);
  332. }
  333. static rt_err_t clock_rate_set(sdhi_instance_ctrl_t *p_ctrl, uint32_t max_rate)
  334. {
  335. uint32_t setting = 0xFFU;
  336. /* Get the runtime frequency of the source of the SD clock */
  337. uint32_t frequency = R_FSP_SystemClockHzGet(BSP_FEATURE_SDHI_CLOCK);
  338. /* Iterate over all possible divisors, starting with the smallest, until the resulting clock rate is less than
  339. * or equal to the requested maximum rate. */
  340. for (uint32_t divisor_shift = BSP_FEATURE_SDHI_MIN_CLOCK_DIVISION_SHIFT;
  341. divisor_shift <= 9U;
  342. divisor_shift++)
  343. {
  344. if ((frequency >> divisor_shift) <= max_rate)
  345. {
  346. /* If the calculated frequency is less than or equal to the maximum supported by the device,
  347. * select this frequency. The register setting is the divisor value divided by 4, or 0xFF for no divider. */
  348. setting = divisor_shift ? ((1U << divisor_shift) >> 2U) : UINT8_MAX;
  349. /* Set the clock setting. */
  350. /* The clock register is accessible 8 SD clock counts after the last command completes. Each register access
  351. * requires at least one PCLK count, so check the register up to 8 times the maximum PCLK divisor value (512). */
  352. uint32_t timeout = 8U * 512U;
  353. while (timeout > 0U)
  354. {
  355. /* Do not write to clock control register until this bit is set. */
  356. if (p_ctrl->p_reg->SD_INFO2_b.SD_CLK_CTRLEN)
  357. {
  358. /* Set the calculated divider and enable clock output to start the 74 clocks required before
  359. * initialization. Do not change the automatic clock control setting. */
  360. uint32_t clkctrlen = p_ctrl->p_reg->SD_CLK_CTRL & (1U << 9);
  361. #if defined(SOC_SERIES_R7KA8P1)
  362. p_ctrl->p_reg->SD_CLK_CTRL = 0x0100 | 0x00FF;
  363. #else
  364. p_ctrl->p_reg->SD_CLK_CTRL = setting | clkctrlen | (1U << 8);
  365. #endif
  366. p_ctrl->device.clock_rate = frequency >> divisor_shift;
  367. return RT_EOK;
  368. }
  369. timeout--;
  370. }
  371. /* Valid setting already found, stop looking. */
  372. break;
  373. }
  374. }
  375. return -RT_ERROR;
  376. }
  377. void ra_sdhi_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
  378. {
  379. struct rthw_sdio *sdio = host->private_data;
  380. RTHW_SDIO_LOCK(sdio);
  381. if (io_cfg->bus_width == MMCSD_BUS_WIDTH_1)
  382. {
  383. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_OPTION_b.WIDTH = 1;
  384. }
  385. else if (io_cfg->bus_width == MMCSD_BUS_WIDTH_4)
  386. {
  387. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_OPTION_b.WIDTH = 0;
  388. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_OPTION_b.WIDTH8 = 0;
  389. }
  390. else if (io_cfg->bus_width == MMCSD_BUS_WIDTH_8)
  391. {
  392. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_OPTION_b.WIDTH = 0;
  393. ((sdhi_instance_ctrl_t *)sdio->sdhi_des.instance->p_ctrl)->p_reg->SD_OPTION_b.WIDTH8 = 1;
  394. }
  395. clock_rate_set(sdio->sdhi_des.instance->p_ctrl, io_cfg->clock);
  396. RTHW_SDIO_UNLOCK(sdio);
  397. }
  398. rt_int32_t ra_sdhi_get_card_status(struct rt_mmcsd_host *host)
  399. {
  400. sdmmc_status_t status;
  401. struct rthw_sdio *sdio = host->private_data;
  402. sdio->sdhi_des.instance->p_api->statusGet(sdio->sdhi_des.instance->p_ctrl, &status);
  403. return status.card_inserted;
  404. }
  405. void ra_sdhi_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
  406. {
  407. struct rthw_sdio *sdio = host->private_data;
  408. sdio->sdhi_des.instance->p_api->ioIntEnable(sdio->sdhi_des.instance->p_ctrl, en);
  409. }
  410. struct rt_mmcsd_host_ops ra_sdhi_ops =
  411. {
  412. .request = ra_sdhi_request,
  413. .set_iocfg = ra_sdhi_set_iocfg,
  414. .get_card_status = ra_sdhi_get_card_status,
  415. .enable_sdio_irq = ra_sdhi_enable_sdio_irq
  416. };
  417. /**
  418. * @brief This function interrupt process function.
  419. * @param host rt_mmcsd_host
  420. * @retval None
  421. */
  422. void rthw_sdio_irq_process(struct rt_mmcsd_host *host)
  423. {
  424. struct rthw_sdio *sdio = host->private_data;
  425. rt_event_send(&sdio->event, 0xffffffff);
  426. }
  427. #ifdef BSP_USING_SDHI0
  428. void sdhi0_callback(sdmmc_callback_args_t *p_args)
  429. {
  430. /* enter interrupt */
  431. rt_interrupt_enter();
  432. if ((SDMMC_EVENT_ERASE_COMPLETE | SDMMC_EVENT_TRANSFER_COMPLETE) & p_args->event)
  433. {
  434. /* Process All SDIO Interrupt Sources */
  435. rthw_sdio_irq_process(host0);
  436. }
  437. /* leave interrupt */
  438. rt_interrupt_leave();
  439. }
  440. #endif
  441. #ifdef BSP_USING_SDHI1
  442. void sdhi1_callback(sdmmc_callback_args_t *p_args)
  443. {
  444. /* enter interrupt */
  445. rt_interrupt_enter();
  446. if ((SDMMC_EVENT_ERASE_COMPLETE | SDMMC_EVENT_TRANSFER_COMPLETE) & p_args->event)
  447. {
  448. /* Process All SDIO Interrupt Sources */
  449. rthw_sdio_irq_process(host1);
  450. }
  451. /* leave interrupt */
  452. rt_interrupt_leave();
  453. }
  454. #endif
  455. struct rt_mmcsd_host *sdio_host_create(struct ra_sdhi *sdhi_des, rt_uint8_t cache_buf[SDIO_BUFF_SIZE])
  456. {
  457. struct rt_mmcsd_host *host;
  458. struct rthw_sdio *sdio = RT_NULL;
  459. if (sdhi_des == RT_NULL)
  460. return RT_NULL;
  461. sdio = rt_malloc(sizeof(struct rthw_sdio));
  462. if (sdio == RT_NULL)
  463. return RT_NULL;
  464. rt_memset(sdio, 0, sizeof(struct rthw_sdio));
  465. sdio->cache_buf = cache_buf;
  466. host = mmcsd_alloc_host();
  467. if (host == RT_NULL)
  468. {
  469. rt_free(sdio);
  470. return RT_NULL;
  471. }
  472. rt_memcpy(&sdio->sdhi_des, sdhi_des, sizeof(struct ra_sdhi));
  473. rt_event_init(&sdio->event, "sdio", RT_IPC_FLAG_FIFO);
  474. rt_mutex_init(&sdio->mutex, "sdio", RT_IPC_FLAG_PRIO);
  475. /* set host default attributes */
  476. host->ops = &ra_sdhi_ops;
  477. host->freq_min = 400 * 1000;
  478. host->freq_max = SDIO_MAX_FREQ;
  479. host->valid_ocr = 0X00FFFF80; /* The voltage range supported is 1.65v-3.6v */
  480. #ifndef SDHI_USING_1_BIT
  481. host->flags = MMCSD_BUSWIDTH_4 | MMCSD_MUTBLKWRITE | MMCSD_SUP_SDIO_IRQ | MMCSD_SUP_HIGHSPEED;
  482. #else
  483. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_SDIO_IRQ | MMCSD_SUP_HIGHSPEED;
  484. #endif
  485. host->max_seg_size = SDIO_BUFF_SIZE;
  486. host->max_dma_segs = 1;
  487. host->max_blk_size = 512;
  488. host->max_blk_count = 512;
  489. /* link up host and sdio */
  490. sdio->host = host;
  491. host->private_data = sdio;
  492. ra_sdhi_enable_sdio_irq(host, 1);
  493. /* ready to change */
  494. #if defined (SOC_SERIES_R7KA8P1)
  495. mmcsd_change(host);
  496. #endif
  497. return host;
  498. }
  499. int rt_hw_sdhi_init(void)
  500. {
  501. #if defined(BSP_USING_SDHI0)
  502. struct ra_sdhi sdhi0;
  503. sdhi0.instance = &g_sdmmc0;
  504. #ifndef SDHI_USING_1_BIT
  505. sdhi0.bus_width = MMCSD_BUS_WIDTH_4;
  506. #else
  507. sdhi0.bus_width = MMCSD_BUS_WIDTH_1;
  508. #endif
  509. sdhi0.instance->p_api->open(sdhi0.instance->p_ctrl, sdhi0.instance->p_cfg);
  510. host0 = sdio_host_create(&sdhi0, cache_buf1);
  511. if (host0 == RT_NULL)
  512. {
  513. return -1;
  514. }
  515. #endif
  516. #if defined(BSP_USING_SDHI1)
  517. struct ra_sdhi sdhi1;
  518. sdhi1.instance = &g_sdmmc1;
  519. #ifndef SDHI_USING_1_BIT
  520. sdhi1.bus_width = MMCSD_BUS_WIDTH_4;
  521. #else
  522. sdhi1.bus_width = MMCSD_BUS_WIDTH_1;
  523. #endif
  524. sdhi1.instance->p_api->open(sdhi1.instance->p_ctrl, sdhi1.instance->p_cfg);
  525. host1 = sdio_host_create(&sdhi1, cache_buf2);
  526. if (host1 == RT_NULL)
  527. {
  528. return -1;
  529. }
  530. #endif
  531. return 0;
  532. }
  533. INIT_DEVICE_EXPORT(rt_hw_sdhi_init);
  534. void sdcard_change(void)
  535. {
  536. #if (defined(SOC_SERIES_R7KA8P1) || defined(SOC_SERIES_R7FA6M4)) && defined(BSP_USING_SDHI0)
  537. mmcsd_change(host0);
  538. #elif (defined(SOC_SERIES_R7FA6M3) || defined(SOC_SERIES_R7FA8M85)) && defined(BSP_USING_SDHI1)
  539. mmcsd_change(host1);
  540. #endif
  541. }