drv_sdio.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. /*
  2. * Copyright (c) 2022-2025 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-02-23 HPMicro First version
  9. * 2022-07-19 HPMicro Fixed the multi-block read/write issue
  10. * 2023-07-27 HPMicro Fixed clock setting issue
  11. * 2023-08-02 HPMicro Added speed mode setting
  12. * 2024-01-03 HPMicro Added multiple instance support
  13. * 2024-05-23 HPMicro Fixed unaligned transfer issue in the SDIO case
  14. * 2024-05-25 HPMicro Added HS200 & HS400 support, optimize the cache-management policy for read
  15. * 2024-05-26 HPMicro Added UHS-I support, added DDR50 and High Speed DDR mode support
  16. * 2024-06-19 HPMicro Added timeout check for SDXC transfer
  17. * 2025-03-06 HPMicro Adapt hpm-sdk v1.9.0
  18. * 2025-03-24 HPMicro Added ADMA3 support, added interrupt driven mode
  19. * 2025-04-11 HPMicro Added the non-cacheable buffer support, avoided dynamic memory allocation at transfer stage
  20. */
  21. #include <rtthread.h>
  22. #ifdef BSP_USING_SDXC
  23. #include <rtdbg.h>
  24. #include <rtdevice.h>
  25. #include "board.h"
  26. #include "hpm_sdxc_drv.h"
  27. #include "hpm_l1c_drv.h"
  28. #define CACHELINE_SIZE HPM_L1C_CACHELINE_SIZE
  29. #define SDXC_ADMA_TABLE_WORDS SDXC_AMDA3_DESC_MIN_WORDS
  30. #define SDXC_AMDA_ADDR_ALIGNMENT (4U)
  31. #define SDXC_ADMA_XFER_SIZE_ALIGNMENT (4U)
  32. #define SDXC_DATA_TIMEOUT (1000) /* 1000ms */
  33. #define SDMMC_DEFAULT_SECTOR_SIZE (512U)
  34. #define SDXC_CACHELINE_ALIGN_DOWN(x) HPM_L1C_CACHELINE_ALIGN_DOWN(x)
  35. #define SDXC_CACHELINE_ALIGN_UP(x) HPM_L1C_CACHELINE_ALIGN_UP(x)
  36. #define SDXC_IS_CACHELINE_ALIGNED(n) ((uint32_t)(n) % (uint32_t)(CACHELINE_SIZE) == 0U)
  37. struct hpm_mmcsd
  38. {
  39. struct rt_mmcsd_host *host;
  40. struct rt_mmcsd_req *req;
  41. struct rt_mmcsd_cmd *cmd;
  42. struct rt_timer *timer;
  43. char name[RT_NAME_MAX];
  44. rt_uint32_t *buf;
  45. SDXC_Type *sdxc_base;
  46. int32_t irq_num;
  47. uint32_t *sdxc_adma_table;
  48. bool support_8bit;
  49. bool support_4bit;
  50. bool support_1v8;
  51. bool support_3v3;
  52. uint8_t power_mode;
  53. uint8_t bus_width;
  54. uint8_t timing;
  55. uint8_t bus_mode;
  56. uint32_t freq;
  57. uint16_t vdd;
  58. const char *vsel_pin_name;
  59. const char *pwr_pin_name;
  60. bool enable_interrupt_driven;
  61. bool use_noncacheable_buf;
  62. uint8_t *data_buf;
  63. uint32_t data_buf_size;
  64. uint8_t irq_priority;
  65. rt_event_t xfer_event;
  66. };
  67. /**
  68. * @brief SDIO CMD53 argument
  69. */
  70. typedef union
  71. {
  72. uint32_t value;
  73. struct
  74. {
  75. uint32_t count :9;
  76. uint32_t reg_addr :17;
  77. uint32_t op_code :1;
  78. uint32_t block_mode :1;
  79. uint32_t func_num :3;
  80. uint32_t rw_flag :1;
  81. };
  82. } sdio_cmd53_arg_t;
  83. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req);
  84. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg);
  85. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en);
  86. static void hpm_sdmmc_host_recovery(SDXC_Type *base);
  87. static hpm_stat_t hpm_sdmmc_transfer_polling(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  88. static hpm_stat_t hpm_sdmmc_transfer_interrupt_driven(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  89. static hpm_stat_t hpm_sdmmc_transfer(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  90. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode);
  91. static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host);
  92. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd);
  93. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd);
  94. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd);
  95. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd);
  96. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output);
  97. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value);
  98. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output)
  99. {
  100. rt_base_t pin = rt_pin_get(pin_name);
  101. if (pin < 0) {
  102. return;
  103. }
  104. rt_uint8_t mode = is_output ? PIN_MODE_OUTPUT : PIN_MODE_INPUT_PULLUP;
  105. rt_pin_mode(pin, mode);
  106. }
  107. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value)
  108. {
  109. rt_base_t pin = rt_pin_get(pin_name);
  110. if (pin < 0)
  111. {
  112. return;
  113. }
  114. rt_pin_write(pin, value);
  115. }
  116. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd)
  117. {
  118. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 1);
  119. }
  120. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd)
  121. {
  122. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 0);
  123. }
  124. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd)
  125. {
  126. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 0);
  127. }
  128. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd)
  129. {
  130. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 1);
  131. }
  132. static rt_int32_t hpm_sdmmc_switch_uhs_voltage(struct rt_mmcsd_host *host)
  133. {
  134. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  135. SDXC_Type *base = mmcsd->sdxc_base;
  136. /* 1. Stop providing clock to the card */
  137. sdxc_enable_inverse_clock(mmcsd->sdxc_base, false);
  138. sdxc_enable_sd_clock(mmcsd->sdxc_base, false);
  139. /* 2. Wait until DAT[3:0] are 4'b0000 */
  140. uint32_t data3_0_level;
  141. uint32_t delay_cnt = 1000000UL;
  142. do
  143. {
  144. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  145. --delay_cnt;
  146. } while ((data3_0_level != 0U) && (delay_cnt > 0U));
  147. if (delay_cnt < 1)
  148. {
  149. return -RT_ETIMEOUT;
  150. }
  151. /* 3. Switch to 1.8V */
  152. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  153. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  154. /* 4. spec:host delay 5ms, host: give more delay time here */
  155. rt_thread_mdelay(10);
  156. /* 5. Provide SD clock the card again */
  157. sdxc_enable_inverse_clock(mmcsd->sdxc_base, true);
  158. sdxc_enable_sd_clock(mmcsd->sdxc_base, true);
  159. /* 6. spec: wait 1ms, host: give more delay time here */
  160. rt_thread_mdelay(5);
  161. /* 7. Check DAT[3:0], make sure the value is 4'b0000 */
  162. delay_cnt = 1000000UL;
  163. do
  164. {
  165. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  166. --delay_cnt;
  167. } while ((data3_0_level == 0U) && (delay_cnt > 0U));
  168. if (delay_cnt < 1)
  169. {
  170. return -RT_ETIMEOUT;
  171. }
  172. return RT_EOK;
  173. }
  174. static const struct rt_mmcsd_host_ops hpm_mmcsd_host_ops =
  175. {
  176. .request = hpm_sdmmc_request,
  177. .set_iocfg = hpm_sdmmc_set_iocfg,
  178. .get_card_status = NULL,
  179. .enable_sdio_irq = hpm_sdmmc_enable_sdio_irq,
  180. .execute_tuning = hpm_sdmmc_execute_tuning,
  181. .switch_uhs_voltage = hpm_sdmmc_switch_uhs_voltage,
  182. };
  183. void hpm_sdmmc_isr(struct hpm_mmcsd *mmcsd)
  184. {
  185. SDXC_Type *base = mmcsd->sdxc_base;
  186. uint32_t int_stat = sdxc_get_interrupt_status(base);
  187. uint32_t int_signal_en = sdxc_get_interrupt_signal(base);
  188. if (((int_stat & SDXC_INT_STAT_CARD_INTERRUPT_MASK) != 0) &&
  189. ((int_signal_en & SDXC_INT_STAT_CARD_INTERRUPT_MASK) != 0))
  190. {
  191. hpm_sdmmc_enable_sdio_irq(mmcsd->host, 0);
  192. rt_sem_release(mmcsd->host->sdio_irq_sem);
  193. }
  194. if (mmcsd->enable_interrupt_driven)
  195. {
  196. const uint32_t xfer_done_or_err_int_mask = SDXC_INT_STAT_CMD_COMPLETE_MASK \
  197. | SDXC_INT_STAT_XFER_COMPLETE_MASK \
  198. | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  199. if (((int_signal_en & xfer_done_or_err_int_mask) != 0U) && ((int_stat & xfer_done_or_err_int_mask) != 0U)) {
  200. uint32_t event_flags = int_stat & xfer_done_or_err_int_mask;
  201. rt_event_send(mmcsd->xfer_event, event_flags);
  202. sdxc_clear_interrupt_status(base, event_flags);
  203. }
  204. }
  205. }
  206. #if defined(BSP_USING_SDXC0)
  207. void sdxc0_isr(void);
  208. /* Place the ADMA table to non-cacheable region */
  209. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8) static uint32_t s_sdxc0_adma_table[SDXC_ADMA_TABLE_WORDS];
  210. #if defined(BSP_SDXC0_USE_NONCACHEABLE_BUFFER)
  211. #if defined(BSP_SDXC0_NONCACHEABLE_BUF_IN_FAST_RAM)
  212. ATTR_PLACE_AT_FAST_RAM_WITH_ALIGNMENT(8)
  213. #else
  214. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8)
  215. #endif
  216. static uint8_t s_sdxc0_noncacheable_buf[BSP_SDXC0_NONCACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE];
  217. #endif
  218. /* SDXC0 */
  219. static struct hpm_mmcsd s_hpm_sdxc0 =
  220. {
  221. .name = "sd0",
  222. .sdxc_base = HPM_SDXC0,
  223. .sdxc_adma_table = s_sdxc0_adma_table,
  224. .irq_num = IRQn_SDXC0,
  225. #if defined(BSP_SDXC0_BUS_WIDTH_8BIT)
  226. .support_8bit = true,
  227. .support_4bit = true,
  228. #elif defined(BSP_SDXC0_BUS_WIDTH_4BIT)
  229. .support_4bit = true,
  230. #elif defined(BSP_SDXC0_BUS_WIDTH_1BIT)
  231. #else
  232. .support_4bit = true,
  233. #endif
  234. #if defined(BSP_SDXC0_VOLTAGE_3V3)
  235. .support_3v3 = true,
  236. #endif
  237. #if defined(BSP_SDXC0_VOLTAGE_1V8)
  238. .support_1v8 = true,
  239. #endif
  240. #if defined(BSP_SDXC0_VOLTAGE_DUAL)
  241. .support_3v3 = true,
  242. .support_1v8 = true,
  243. #endif
  244. #if defined(BSP_SDXC0_VSEL_PIN)
  245. .vsel_pin_name = BSP_SDXC0_VSEL_PIN,
  246. #endif
  247. #if defined(BSP_SDXC0_PWR_PIN)
  248. .pwr_pin_name = BSP_SDXC0_PWR_PIN,
  249. #endif
  250. #if defined(BSP_SDXC0_ENABLE_INTERRUPT_DRIVEN)
  251. .enable_interrupt_driven = true,
  252. #endif
  253. #if defined(BSP_SDXC0_REQUIRE_CACHELINE_ALIGNED_BUF)
  254. .require_cacheline_aligned_buf = true,
  255. #endif
  256. #if defined(BSP_SDXC0_IRQ_PRIORITY)
  257. .irq_priority = BSP_SDXC0_IRQ_PRIORITY,
  258. #else
  259. .irq_priority = 1,
  260. #endif
  261. #if defined (BSP_SDXC0_USE_NONCACHEABLE_BUFFER)
  262. .use_noncacheable_buf = true,
  263. .data_buf = s_sdxc0_noncacheable_buf,
  264. .data_buf_size = sizeof(s_sdxc0_noncacheable_buf),
  265. #endif
  266. #if defined(BSP_SDXC0_USE_CACHEABLE_BUFFER)
  267. .data_buf_size = BSP_SDXC0_CACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE,
  268. #endif
  269. };
  270. SDK_DECLARE_EXT_ISR_M(IRQn_SDXC0, sdxc0_isr);
  271. void sdxc0_isr(void)
  272. {
  273. hpm_sdmmc_isr(&s_hpm_sdxc0);
  274. }
  275. #endif
  276. #if defined(BSP_USING_SDXC1)
  277. void sdxc1_isr(void);
  278. /* Place the ADMA table to non-cacheable region */
  279. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8) static uint32_t s_sdxc1_adma_table[SDXC_ADMA_TABLE_WORDS];
  280. #if defined(BSP_SDXC1_NONCACHEABLE_BUFFER_SIZE_IN_SECTOR)
  281. #if defined(BSP_SDXC1_USE_NONCACHEABLE_BUFFER)
  282. ATTR_PLACE_AT_FAST_RAM_WITH_ALIGNMENT(8)
  283. #else
  284. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8)
  285. #endif
  286. static uint8_t s_sdxc1_noncacheable_buf[BSP_SDXC1_NONCACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE];
  287. #endif
  288. static struct hpm_mmcsd s_hpm_sdxc1 =
  289. {
  290. .name = "sd1",
  291. .sdxc_base = HPM_SDXC1,
  292. .sdxc_adma_table = s_sdxc1_adma_table,
  293. .irq_num = IRQn_SDXC1,
  294. #if defined(BSP_SDXC1_BUS_WIDTH_8BIT)
  295. .support_8bit = true,
  296. .support_4bit = true,
  297. #elif defined(BSP_SDXC1_BUS_WIDTH_4BIT)
  298. .support_4bit = true,
  299. #elif defined(BSP_SDXC1_BUS_WIDTH_1BIT)
  300. #else
  301. .support_4bit = true,
  302. #endif
  303. #if defined(BSP_SDXC1_VOLTAGE_3V3)
  304. .support_3v3 = true,
  305. #endif
  306. #if defined(BSP_SDXC1_VOLTAGE_1V8)
  307. .support_1v8 = true,
  308. #endif
  309. #if defined(BSP_SDXC1_VOLTAGE_DUAL)
  310. .support_3v3 = true,
  311. .support_1v8 = true,
  312. #endif
  313. #if defined(BSP_SDXC1_VSEL_PIN)
  314. .vsel_pin_name = BSP_SDXC1_VSEL_PIN,
  315. #endif
  316. #if defined(BSP_SDXC1_PWR_PIN)
  317. .pwr_pin_name = BSP_SDXC1_PWR_PIN,
  318. #endif
  319. #if defined(BSP_SDXC1_ENABLE_INTERRUPT_DRIVEN)
  320. .enable_interrupt_driven = true,
  321. #endif
  322. #if defined(BSP_SDXC1_REQUIRE_CACHELINE_ALIGNED_BUF)
  323. .require_cacheline_aligned_buf = true,
  324. #endif
  325. #if defined(BSP_SDXC1_IRQ_PRIORITY)
  326. .irq_priority = BSP_SDXC1_IRQ_PRIORITY,
  327. #else
  328. .irq_priority = 1,
  329. #endif
  330. #if defined (BSP_SDXC1_USE_NONCACHEABLE_BUFFER)
  331. .use_noncacheable_buf = true,
  332. .data_buf = s_sdxc1_noncacheable_buf,
  333. .data_buf_size = sizeof(s_sdxc1_noncacheable_buf),
  334. #endif
  335. #if defined(BSP_SDXC1_USE_CACHEABLE_BUFFER)
  336. .data_buf_size = BSP_SDXC1_CACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE,
  337. #endif
  338. };
  339. SDK_DECLARE_EXT_ISR_M(IRQn_SDXC1, sdxc1_isr);
  340. void sdxc1_isr(void)
  341. {
  342. hpm_sdmmc_isr(&s_hpm_sdxc1);
  343. }
  344. #endif
  345. static struct hpm_mmcsd *hpm_sdxcs[] =
  346. {
  347. #if defined(BSP_USING_SDXC0)
  348. &s_hpm_sdxc0,
  349. #endif
  350. #if defined(BSP_USING_SDXC1)
  351. &s_hpm_sdxc1,
  352. #endif
  353. };
  354. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
  355. {
  356. RT_ASSERT(host != RT_NULL);
  357. RT_ASSERT(host->private_data != RT_NULL);
  358. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  359. SDXC_Type *base = mmcsd->sdxc_base;
  360. RT_ASSERT(base != RT_NULL);
  361. /* Prepare the Auto tuning environment */
  362. sdxc_stop_clock_during_phase_code_change(base, true);
  363. sdxc_set_post_change_delay(base, 3U);
  364. sdxc_select_cardclk_delay_source(base, false);
  365. sdxc_enable_power(base, true);
  366. hpm_stat_t err = sdxc_perform_auto_tuning(base, opcode);
  367. return (err != status_success) ? -RT_EPERM : RT_EOK;
  368. }
  369. static hpm_stat_t hpm_sdmmc_transfer_polling(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  370. {
  371. hpm_stat_t status;
  372. SDXC_Type *base = mmcsd->sdxc_base;
  373. sdxc_command_t *cmd = xfer->command;
  374. sdxc_data_t *data = xfer->data;
  375. volatile uint32_t interrupt_status;
  376. volatile rt_tick_t start_tick;
  377. rt_tick_t current_tick;
  378. bool need_chk_xfer_stat = false;
  379. sdxc_clear_interrupt_status(base, ~0UL);
  380. if (data == NULL)
  381. {
  382. (void)sdxc_send_command(base, cmd);
  383. /* Wait until idle */
  384. start_tick = rt_tick_get();
  385. do
  386. {
  387. interrupt_status = sdxc_get_interrupt_status(base);
  388. status = sdxc_parse_interrupt_status(base);
  389. HPM_BREAK_IF(status != status_success);
  390. current_tick = rt_tick_get();
  391. if ((current_tick - start_tick) > RT_TICK_PER_SECOND) {
  392. status = status_timeout;
  393. break;
  394. }
  395. } while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_CMD_COMPLETE_MASK));
  396. if ((status == status_success) && (cmd->resp_type == sdxc_dev_resp_r1b))
  397. {
  398. need_chk_xfer_stat = true;
  399. }
  400. }
  401. else
  402. {
  403. status = sdxc_transfer_nonblocking(base, dma_config, xfer);
  404. if (status != status_success)
  405. {
  406. return status;
  407. }
  408. if (dma_config->dma_type == sdxc_dmasel_adma2)
  409. {
  410. /* Wait until idle */
  411. interrupt_status = sdxc_get_interrupt_status(base);
  412. start_tick = rt_tick_get();
  413. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_CMD_COMPLETE_MASK))
  414. {
  415. interrupt_status = sdxc_get_interrupt_status(base);
  416. status = sdxc_parse_interrupt_status(base);
  417. HPM_BREAK_IF(status != status_success);
  418. current_tick = rt_tick_get();
  419. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  420. {
  421. status = status_timeout;
  422. break;
  423. }
  424. }
  425. }
  426. if (status == status_success)
  427. {
  428. need_chk_xfer_stat = true;
  429. }
  430. }
  431. if (need_chk_xfer_stat)
  432. {
  433. interrupt_status = sdxc_get_interrupt_status(base);
  434. start_tick = rt_tick_get();
  435. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_XFER_COMPLETE_MASK | SDXC_STS_ERROR))
  436. {
  437. interrupt_status = sdxc_get_interrupt_status(base);
  438. status = sdxc_parse_interrupt_status(base);
  439. HPM_BREAK_IF(status != status_success);
  440. current_tick = rt_tick_get();
  441. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  442. {
  443. status = status_timeout;
  444. break;
  445. }
  446. }
  447. }
  448. if (status == status_success)
  449. {
  450. status = sdxc_receive_cmd_response(base, cmd);
  451. }
  452. return status;
  453. }
  454. static hpm_stat_t hpm_sdmmc_transfer_interrupt_driven(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  455. {
  456. hpm_stat_t status;
  457. SDXC_Type *base = mmcsd->sdxc_base;
  458. sdxc_command_t *cmd = xfer->command;
  459. sdxc_data_t *data = xfer->data;
  460. bool need_chk_xfer_stat = false;
  461. sdxc_clear_interrupt_status(base, ~0UL);
  462. if (data == NULL)
  463. {
  464. (void)sdxc_send_command(base, cmd);
  465. /* Wait until idle */
  466. const uint32_t wait_event_flags = SDXC_INT_STAT_CMD_COMPLETE_MASK \
  467. | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  468. rt_err_t err = rt_event_recv(mmcsd->xfer_event, wait_event_flags, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_TICK_PER_SECOND, NULL);
  469. if (err == RT_EOK)
  470. {
  471. status = sdxc_parse_interrupt_status(base);
  472. }
  473. else
  474. {
  475. status = status_timeout;
  476. }
  477. if ((status == status_success) && (cmd->resp_type == sdxc_dev_resp_r1b))
  478. {
  479. need_chk_xfer_stat = true;
  480. }
  481. }
  482. else
  483. {
  484. status = sdxc_transfer_nonblocking(base, dma_config, xfer);
  485. if (status != status_success)
  486. {
  487. return status;
  488. }
  489. if (dma_config->dma_type == sdxc_dmasel_adma2)
  490. {
  491. const uint32_t wait_event_flags = SDXC_INT_STAT_CMD_COMPLETE_MASK | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  492. rt_err_t err = rt_event_recv(mmcsd->xfer_event, wait_event_flags, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_TICK_PER_SECOND, NULL);
  493. if (err == RT_EOK)
  494. {
  495. status = sdxc_parse_interrupt_status(base);
  496. }
  497. else
  498. {
  499. status = status_timeout;
  500. }
  501. }
  502. if (status == status_success)
  503. {
  504. need_chk_xfer_stat = true;
  505. }
  506. }
  507. if (need_chk_xfer_stat)
  508. {
  509. const uint32_t wait_event_flags = SDXC_INT_STAT_XFER_COMPLETE_MASK | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  510. rt_err_t err = rt_event_recv(mmcsd->xfer_event, wait_event_flags, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_TICK_PER_SECOND, NULL);
  511. if (err == RT_EOK)
  512. {
  513. status = sdxc_parse_interrupt_status(base);
  514. }
  515. }
  516. if (status == status_success)
  517. {
  518. status = sdxc_receive_cmd_response(base, cmd);
  519. }
  520. return status;
  521. }
  522. static hpm_stat_t hpm_sdmmc_transfer(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  523. {
  524. if (mmcsd->enable_interrupt_driven)
  525. {
  526. return hpm_sdmmc_transfer_interrupt_driven(mmcsd, dma_config, xfer);
  527. }
  528. else
  529. {
  530. return hpm_sdmmc_transfer_polling(mmcsd, dma_config, xfer);
  531. }
  532. }
  533. /**
  534. * !@brief SDMMC request implementation based on HPMicro SDXC Host
  535. */
  536. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
  537. {
  538. RT_ASSERT(host != RT_NULL);
  539. RT_ASSERT(host->private_data != RT_NULL);
  540. RT_ASSERT(req != RT_NULL);
  541. RT_ASSERT(req->cmd != RT_NULL);
  542. sdxc_adma_config_t adma_config = { 0 };
  543. sdxc_xfer_t xfer = { 0 };
  544. sdxc_command_t sdxc_cmd = { 0 };
  545. sdxc_data_t sdxc_data = { 0 };
  546. bool need_copy_back = false;
  547. hpm_stat_t err = status_invalid_argument;
  548. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  549. struct rt_mmcsd_cmd *cmd = req->cmd;
  550. struct rt_mmcsd_data *data = cmd->data;
  551. /* configure command */
  552. sdxc_cmd.cmd_index = cmd->cmd_code;
  553. sdxc_cmd.cmd_argument = cmd->arg;
  554. sdxc_cmd.cmd_type = (cmd->cmd_code == STOP_TRANSMISSION) ? sdxc_cmd_type_abort_cmd : sdxc_cmd_type_normal_cmd;
  555. switch (cmd->flags & RESP_MASK)
  556. {
  557. case RESP_NONE:
  558. sdxc_cmd.resp_type = sdxc_dev_resp_none;
  559. break;
  560. case RESP_R1:
  561. sdxc_cmd.resp_type = sdxc_dev_resp_r1;
  562. break;
  563. case RESP_R1B:
  564. sdxc_cmd.resp_type = sdxc_dev_resp_r1b;
  565. break;
  566. case RESP_R2:
  567. sdxc_cmd.resp_type = sdxc_dev_resp_r2;
  568. break;
  569. case RESP_R3:
  570. sdxc_cmd.resp_type = sdxc_dev_resp_r3;
  571. break;
  572. case RESP_R4:
  573. sdxc_cmd.resp_type = sdxc_dev_resp_r4;
  574. break;
  575. case RESP_R6:
  576. sdxc_cmd.resp_type = sdxc_dev_resp_r6;
  577. break;
  578. case RESP_R7:
  579. sdxc_cmd.resp_type = sdxc_dev_resp_r7;
  580. break;
  581. case RESP_R5:
  582. sdxc_cmd.resp_type = sdxc_dev_resp_r5;
  583. break;
  584. default:
  585. RT_ASSERT(NULL);
  586. break;
  587. }
  588. sdxc_cmd.cmd_flags = 0UL;
  589. xfer.command = &sdxc_cmd;
  590. xfer.data = NULL;
  591. if (data != NULL)
  592. {
  593. sdxc_data.enable_auto_cmd12 = false;
  594. sdxc_data.enable_auto_cmd23 = false;
  595. sdxc_data.enable_ignore_error = false;
  596. sdxc_data.block_size = data->blksize;
  597. sdxc_data.block_cnt = data->blks;
  598. /* configure adma3 */
  599. adma_config.dma_type = sdxc_dmasel_adma3;
  600. adma_config.adma_table = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE,
  601. (uint32_t) mmcsd->sdxc_adma_table);
  602. adma_config.adma_table_words = SDXC_ADMA_TABLE_WORDS;
  603. size_t xfer_buf_addr = (uint32_t)data->buf;
  604. uint32_t xfer_len = data->blks * data->blksize;
  605. bool need_cache_maintenance = true;
  606. if ((req->data->flags & DATA_DIR_WRITE) != 0U)
  607. {
  608. uint32_t write_size = xfer_len;
  609. size_t aligned_start;
  610. uint32_t aligned_size;
  611. if ((xfer_buf_addr % CACHELINE_SIZE != 0) || (write_size % CACHELINE_SIZE != 0))
  612. {
  613. uint32_t write_size = xfer_len;
  614. aligned_size = SDXC_CACHELINE_ALIGN_UP(write_size);
  615. rt_memcpy(mmcsd->data_buf, data->buf, xfer_len);
  616. rt_memset(&mmcsd->data_buf[write_size], 0, aligned_size - write_size);
  617. sdxc_data.tx_data = (uint32_t const *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)mmcsd->data_buf);
  618. aligned_start = (uint32_t)sdxc_data.tx_data;
  619. need_cache_maintenance = !mmcsd->use_noncacheable_buf;
  620. }
  621. else
  622. {
  623. sdxc_data.tx_data = (uint32_t const *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  624. aligned_start = (uint32_t)sdxc_data.tx_data;
  625. aligned_size = write_size;
  626. }
  627. if (need_cache_maintenance)
  628. {
  629. l1c_dc_flush(aligned_start, aligned_size);
  630. }
  631. sdxc_data.rx_data = NULL;
  632. }
  633. else
  634. {
  635. uint32_t read_size = xfer_len;
  636. uint32_t aligned_read_size;
  637. if ((xfer_buf_addr % CACHELINE_SIZE != 0) || (read_size % CACHELINE_SIZE != 0))
  638. {
  639. aligned_read_size = SDXC_CACHELINE_ALIGN_UP(read_size);
  640. sdxc_data.rx_data = (uint32_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)mmcsd->data_buf);
  641. need_copy_back = true;
  642. need_cache_maintenance = !mmcsd->use_noncacheable_buf;
  643. }
  644. else
  645. {
  646. aligned_read_size = read_size;
  647. sdxc_data.rx_data = (uint32_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  648. }
  649. /* Invalidate cache-line for the new allocated buffer */
  650. if (need_cache_maintenance)
  651. {
  652. l1c_dc_invalidate((uint32_t)sdxc_data.rx_data, aligned_read_size);
  653. }
  654. sdxc_data.tx_data = RT_NULL;
  655. }
  656. xfer.data = &sdxc_data;
  657. /* Align the write/read size since the ADMA engine in the SDXC cannot transfer unaligned size of data */
  658. if ((cmd->cmd_code == SD_IO_RW_EXTENDED) && (xfer_len % SDXC_ADMA_XFER_SIZE_ALIGNMENT != 0))
  659. {
  660. sdio_cmd53_arg_t cmd53_arg;
  661. cmd53_arg.value = sdxc_cmd.cmd_argument;
  662. cmd53_arg.count = HPM_ALIGN_UP(xfer_len, SDXC_ADMA_XFER_SIZE_ALIGNMENT);
  663. sdxc_cmd.cmd_argument = cmd53_arg.value;
  664. sdxc_data.block_size = HPM_ALIGN_UP(xfer_len, SDXC_ADMA_XFER_SIZE_ALIGNMENT);
  665. }
  666. }
  667. if ((req->data->blks > 1) && ((cmd->cmd_code == READ_MULTIPLE_BLOCK) || ((cmd->cmd_code == WRITE_MULTIPLE_BLOCK))))
  668. {
  669. xfer.data->enable_auto_cmd12 = true;
  670. }
  671. err = hpm_sdmmc_transfer(mmcsd, &adma_config, &xfer);
  672. LOG_I("cmd=%d, arg=%x\n", cmd->cmd_code, cmd->arg);
  673. if (err != status_success)
  674. {
  675. hpm_sdmmc_host_recovery(mmcsd->sdxc_base);
  676. if (err != status_sdxc_cmd_timeout_error) /* Ignore command timeout error by default */
  677. {
  678. LOG_E(" ***hpm_sdmmc_transfer error: %d, cmd:%d, arg:0x%x*** -->\n", err, cmd->cmd_code, cmd->arg);
  679. }
  680. cmd->err = -RT_ERROR;
  681. }
  682. else
  683. {
  684. LOG_I(" ***hpm_sdmmc_transfer passed: %d*** -->\n", err);
  685. if (sdxc_cmd.resp_type == sdxc_dev_resp_r2)
  686. {
  687. LOG_I("resp:0x%08x 0x%08x 0x%08x 0x%08x\n", sdxc_cmd.response[0],
  688. sdxc_cmd.response[1],
  689. sdxc_cmd.response[2],
  690. sdxc_cmd.response[3]);
  691. }
  692. else
  693. {
  694. LOG_I("resp:0x%08x\n", sdxc_cmd.response[0]);
  695. }
  696. }
  697. if ((sdxc_data.rx_data != NULL) && (cmd->err == RT_EOK))
  698. {
  699. uint32_t read_size = data->blks * data->blksize;
  700. if (need_copy_back)
  701. {
  702. rt_memcpy(data->buf, mmcsd->data_buf, read_size);
  703. }
  704. }
  705. if ((cmd->flags & RESP_MASK) == RESP_R2)
  706. {
  707. cmd->resp[3] = sdxc_cmd.response[0];
  708. cmd->resp[2] = sdxc_cmd.response[1];
  709. cmd->resp[1] = sdxc_cmd.response[2];
  710. cmd->resp[0] = sdxc_cmd.response[3];
  711. }
  712. else
  713. {
  714. cmd->resp[0] = sdxc_cmd.response[0];
  715. }
  716. mmcsd_req_complete(host);
  717. }
  718. static void hpm_sdmmc_set_cardclk_delay_chain(struct hpm_mmcsd *mmcsd)
  719. {
  720. SDXC_Type *base = mmcsd->sdxc_base;
  721. bool need_inverse = sdxc_is_inverse_clock_enabled(base);
  722. sdxc_enable_inverse_clock(base, false);
  723. sdxc_enable_sd_clock(base, false);
  724. uint32_t num_delaycells = sdxc_get_default_cardclk_delay_chain(base, mmcsd->freq);
  725. sdxc_set_cardclk_delay_chain(base, num_delaycells);
  726. sdxc_enable_inverse_clock(base, need_inverse);
  727. sdxc_enable_sd_clock(base, true);
  728. }
  729. ATTR_WEAK void init_sdxc_ds_pin(SDXC_Type *base)
  730. {
  731. LOG_W("Ignore this warning if the DS pin is not supported\n");
  732. }
  733. /**
  734. * !@brief Set IO Configuration for HPMicro IO and SDXC Host
  735. */
  736. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
  737. {
  738. RT_ASSERT(host != RT_NULL);
  739. RT_ASSERT(host->private_data != RT_NULL);
  740. RT_ASSERT(io_cfg != RT_NULL);
  741. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  742. /* Power control */
  743. uint32_t vdd = io_cfg->vdd;
  744. if (io_cfg->power_mode != mmcsd->power_mode)
  745. {
  746. switch(io_cfg->power_mode)
  747. {
  748. case MMCSD_POWER_OFF:
  749. hpm_sdmmc_power_off_via_pin(mmcsd);
  750. break;
  751. case MMCSD_POWER_ON:
  752. hpm_sdmmc_power_on_via_pin(mmcsd);
  753. break;
  754. case MMCSD_POWER_UP:
  755. hpm_sdmmc_power_off_via_pin(mmcsd);
  756. rt_thread_mdelay(10);
  757. hpm_sdmmc_power_on_via_pin(mmcsd);
  758. /* After power up, wait 1ms, then wait 74 card clock */
  759. rt_thread_mdelay(1);
  760. sdxc_wait_card_active(mmcsd->sdxc_base);
  761. break;
  762. default:
  763. /* Do nothing */
  764. break;
  765. }
  766. mmcsd->power_mode = io_cfg->power_mode;
  767. }
  768. /* Voltage switch */
  769. if (mmcsd->vdd != vdd)
  770. {
  771. if (vdd == 7)
  772. {
  773. /* Switch to 1.8V */
  774. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  775. }
  776. else
  777. {
  778. /* Switch to 3V */
  779. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  780. }
  781. mmcsd->vdd = vdd;
  782. }
  783. /* Set bus width */
  784. if (mmcsd->bus_width != io_cfg->bus_width)
  785. {
  786. switch (io_cfg->bus_width)
  787. {
  788. case MMCSD_BUS_WIDTH_4:
  789. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_4bit);
  790. break;
  791. case MMCSD_BUS_WIDTH_8:
  792. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_8bit);
  793. break;
  794. default:
  795. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_1bit);
  796. break;
  797. }
  798. mmcsd->bus_width = io_cfg->bus_width;
  799. }
  800. /* Set timing mode */
  801. bool need_config_ds = false;
  802. if (mmcsd->timing != io_cfg->timing)
  803. {
  804. switch (io_cfg->timing)
  805. {
  806. case MMCSD_TIMING_LEGACY:
  807. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_normal);
  808. break;
  809. case MMCSD_TIMING_SD_HS:
  810. case MMCSD_TIMING_MMC_HS:
  811. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_high);
  812. break;
  813. case MMCSD_TIMING_UHS_SDR12:
  814. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr12);
  815. break;
  816. case MMCSD_TIMING_UHS_SDR25:
  817. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr25);
  818. break;
  819. case MMCSD_TIMING_UHS_SDR50:
  820. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr50);
  821. break;
  822. case MMCSD_TIMING_UHS_SDR104:
  823. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr104);
  824. break;
  825. case MMCSD_TIMING_UHS_DDR50:
  826. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_ddr50);
  827. /* Must switch to 1.8V signaling for UHS_DDR50 */
  828. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  829. break;
  830. case MMCSD_TIMING_MMC_DDR52:
  831. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  832. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_high_speed_ddr);
  833. break;
  834. case MMCSD_TIMING_MMC_HS200:
  835. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  836. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs200);
  837. break;
  838. case MMCSD_TIMING_MMC_HS400:
  839. case MMCSD_TIMING_MMC_HS400_ENH_DS:
  840. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  841. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs400);
  842. if (io_cfg->timing == MMCSD_TIMING_MMC_HS400_ENH_DS)
  843. {
  844. sdxc_enable_enhanced_strobe(mmcsd->sdxc_base, true);
  845. uint32_t num_delaycells = sdxc_get_default_strobe_delay(mmcsd->sdxc_base);
  846. sdxc_set_data_strobe_delay(mmcsd->sdxc_base, num_delaycells);
  847. }
  848. need_config_ds = true;
  849. break;
  850. }
  851. mmcsd->timing = io_cfg->timing;
  852. }
  853. /* Initialize SDXC Pins */
  854. bool open_drain = io_cfg->bus_mode == MMCSD_BUSMODE_OPENDRAIN;
  855. bool is_1v8 = (io_cfg->vdd == 7) || (mmcsd->host->valid_ocr == VDD_165_195);
  856. uint32_t width = (io_cfg->bus_width == MMCSD_BUS_WIDTH_8) ? 8 : ((io_cfg->bus_width == MMCSD_BUS_WIDTH_4) ? 4 : 1);
  857. init_sdxc_cmd_pin(mmcsd->sdxc_base, open_drain, is_1v8);
  858. init_sdxc_clk_data_pins(mmcsd->sdxc_base, width, is_1v8);
  859. rt_thread_mdelay(1);
  860. if (need_config_ds)
  861. {
  862. init_sdxc_ds_pin(mmcsd->sdxc_base);
  863. rt_thread_mdelay(1);
  864. }
  865. /* Initialize SDXC clock */
  866. uint32_t sdxc_clock = io_cfg->clock;
  867. if (sdxc_clock != 0U)
  868. {
  869. if (mmcsd->freq != sdxc_clock)
  870. {
  871. bool need_reverse = true;
  872. bool need_card_delay_clk = false;
  873. if ((mmcsd->timing == MMCSD_TIMING_UHS_DDR50) ||
  874. (mmcsd->timing == MMCSD_TIMING_MMC_DDR52) ||
  875. (mmcsd->timing == MMCSD_TIMING_MMC_HS400) ||
  876. (mmcsd->timing == MMCSD_TIMING_MMC_HS400_ENH_DS))
  877. {
  878. need_reverse = false;
  879. need_card_delay_clk = true;
  880. }
  881. /* Ensure request frequency from mmcsd stack level doesn't exceed maximum supported frequency by host */
  882. uint32_t clock_freq = MIN(mmcsd->host->freq_max, sdxc_clock);
  883. clock_freq = board_sd_configure_clock(mmcsd->sdxc_base, clock_freq, need_reverse);
  884. LOG_I("mmcsd clock: %dHz\n", clock_freq);
  885. mmcsd->freq = sdxc_clock;
  886. if (need_card_delay_clk)
  887. {
  888. hpm_sdmmc_set_cardclk_delay_chain(mmcsd);
  889. }
  890. }
  891. }
  892. }
  893. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
  894. {
  895. RT_ASSERT(host != RT_NULL);
  896. RT_ASSERT(host->private_data != RT_NULL);
  897. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  898. RT_ASSERT(mmcsd->sdxc_base != RT_NULL);
  899. SDXC_Type *base = mmcsd->sdxc_base;
  900. if (en != 0)
  901. {
  902. sdxc_enable_interrupt_signal(base, SDXC_INT_STAT_CARD_INTERRUPT_MASK, true);
  903. sdxc_enable_interrupt_status(base, SDXC_INT_STAT_CARD_INTERRUPT_MASK, true);
  904. }
  905. else
  906. {
  907. sdxc_enable_interrupt_status(base, SDXC_INT_STAT_CARD_INTERRUPT_MASK, false);
  908. }
  909. }
  910. static void hpm_sdmmc_host_recovery(SDXC_Type *base)
  911. {
  912. uint32_t pstate = sdxc_get_present_status(base);
  913. bool need_reset_cmd_line = false;
  914. bool need_reset_data_line = false;
  915. if ((pstate & SDXC_PSTATE_CMD_INHIBIT_MASK) != 0U)
  916. {
  917. /* Reset command line */
  918. need_reset_cmd_line = true;
  919. }
  920. if ((pstate & SDXC_PSTATE_DAT_INHIBIT_MASK) != 0U)
  921. {
  922. /* Reset data line */
  923. need_reset_data_line = true;
  924. }
  925. uint32_t int_stat = sdxc_get_interrupt_status(base);
  926. if ((int_stat & 0xF0000UL) != 0U)
  927. {
  928. need_reset_cmd_line = true;
  929. }
  930. if ((int_stat & 0x700000) != 0U)
  931. {
  932. need_reset_data_line = true;
  933. }
  934. if (need_reset_cmd_line)
  935. {
  936. sdxc_reset(base, sdxc_reset_cmd_line, 0xFFFFUL);
  937. }
  938. if (need_reset_data_line)
  939. {
  940. sdxc_reset(base, sdxc_reset_data_line, 0xFFFFUL);
  941. }
  942. if (need_reset_cmd_line || need_reset_data_line)
  943. {
  944. sdxc_clear_interrupt_status(base, ~0UL);
  945. }
  946. rt_thread_mdelay(10);
  947. }
  948. int rt_hw_sdio_init(void)
  949. {
  950. rt_err_t err = RT_EOK;
  951. struct rt_mmcsd_host *host = NULL;
  952. struct hpm_mmcsd *mmcsd = NULL;
  953. for (uint32_t i = 0; i < ARRAY_SIZE(hpm_sdxcs); i++)
  954. {
  955. host = mmcsd_alloc_host();
  956. if (host == NULL)
  957. {
  958. err = -RT_ERROR;
  959. break;
  960. }
  961. mmcsd = hpm_sdxcs[i];
  962. host->ops = &hpm_mmcsd_host_ops;
  963. host->freq_min = 375000;
  964. host->freq_max = 50000000;
  965. host->valid_ocr = 0;
  966. /* Determine supported Voltage range */
  967. if (mmcsd->support_3v3)
  968. {
  969. host->valid_ocr |= VDD_30_31 | VDD_31_32 | VDD_32_33 | VDD_33_34;
  970. }
  971. if (mmcsd->support_1v8)
  972. {
  973. host->valid_ocr |= VDD_165_195;
  974. }
  975. /* Determine Host supported features */
  976. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  977. if (mmcsd->support_4bit)
  978. {
  979. host->flags |= MMCSD_BUSWIDTH_4;
  980. }
  981. if (mmcsd->support_8bit) {
  982. host->flags |= MMCSD_BUSWIDTH_8;
  983. }
  984. if (mmcsd->support_1v8)
  985. {
  986. host->freq_max = 166000000;
  987. host->flags |= MMCSD_SUP_HS200_1V8;
  988. host->flags |= MMCSD_SUP_SDR50 | MMCSD_SUP_SDR104;
  989. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  990. {
  991. host->flags |= MMCSD_SUP_DDR50;
  992. }
  993. if (mmcsd->support_8bit)
  994. {
  995. host->flags |= MMCSD_SUP_HS400_1V8 | MMCSD_SUP_ENH_DS;
  996. }
  997. }
  998. /* For eMMC device, add High Speed DDR mode support as long as it is supported by the host controller */
  999. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  1000. {
  1001. host->flags |= MMCSD_SUP_HIGHSPEED_DDR;
  1002. }
  1003. rt_strncpy(host->name, mmcsd->name, RT_NAME_MAX);
  1004. if (!mmcsd->use_noncacheable_buf)
  1005. {
  1006. mmcsd->data_buf = rt_malloc_align(mmcsd->data_buf_size, CACHELINE_SIZE);
  1007. }
  1008. RT_ASSERT(mmcsd->data_buf != RT_NULL);
  1009. host->max_seg_size = mmcsd->data_buf_size;
  1010. host->max_dma_segs = 1;
  1011. host->max_blk_size = 512;
  1012. host->max_blk_count = mmcsd->data_buf_size / SDMMC_DEFAULT_SECTOR_SIZE;
  1013. mmcsd->host = host;
  1014. /* Perform necessary initialization */
  1015. board_sd_configure_clock(mmcsd->sdxc_base, 375000, true);
  1016. sdxc_config_t sdxc_config = { 0 };
  1017. sdxc_config.data_timeout = SDXC_DATA_TIMEOUT;
  1018. sdxc_init(mmcsd->sdxc_base, &sdxc_config);
  1019. host->private_data = mmcsd;
  1020. /* Initialize PWR pin and VSEL pin */
  1021. if (mmcsd->pwr_pin_name != RT_NULL)
  1022. {
  1023. hpm_sdmmc_pin_init(mmcsd->pwr_pin_name, true);
  1024. rt_thread_mdelay(1);
  1025. if (host->valid_ocr == VDD_165_195)
  1026. {
  1027. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  1028. }
  1029. else
  1030. {
  1031. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  1032. }
  1033. }
  1034. if (mmcsd->vsel_pin_name != RT_NULL)
  1035. {
  1036. hpm_sdmmc_pin_init(mmcsd->vsel_pin_name, true);
  1037. rt_thread_mdelay(1);
  1038. }
  1039. if (mmcsd->enable_interrupt_driven)
  1040. {
  1041. char event_name[RT_NAME_MAX];
  1042. snprintf(event_name, sizeof(event_name), "%s%s", mmcsd->name, "_evt");
  1043. mmcsd->xfer_event = rt_event_create(event_name, RT_IPC_FLAG_FIFO);
  1044. RT_ASSERT(mmcsd->xfer_event != RT_NULL);
  1045. const uint32_t irq_mask = SDXC_INT_STAT_CMD_COMPLETE_MASK \
  1046. | SDXC_INT_STAT_XFER_COMPLETE_MASK \
  1047. | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  1048. sdxc_enable_interrupt_signal(mmcsd->sdxc_base, irq_mask, true);
  1049. }
  1050. intc_m_enable_irq_with_priority(mmcsd->irq_num, mmcsd->irq_priority);
  1051. mmcsd_change(host);
  1052. }
  1053. if (err != RT_EOK)
  1054. {
  1055. if (host != NULL)
  1056. {
  1057. mmcsd_free_host(host);
  1058. host = NULL;
  1059. }
  1060. }
  1061. return err;
  1062. }
  1063. INIT_DEVICE_EXPORT(rt_hw_sdio_init);
  1064. #endif