drv_sdio.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /*
  2. * Copyright (c) 2022-2025 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-02-23 HPMicro First version
  9. * 2022-07-19 HPMicro Fixed the multi-block read/write issue
  10. * 2023-07-27 HPMicro Fixed clock setting issue
  11. * 2023-08-02 HPMicro Added speed mode setting
  12. * 2024-01-03 HPMicro Added multiple instance support
  13. * 2024-05-23 HPMicro Fixed unaligned transfer issue in the SDIO case
  14. * 2024-05-25 HPMicro Added HS200 & HS400 support, optimize the cache-management policy for read
  15. * 2024-05-26 HPMicro Added UHS-I support, added DDR50 and High Speed DDR mode support
  16. * 2024-06-19 HPMicro Added timeout check for SDXC transfer
  17. * 2025-03-06 HPMicro Adapt hpm-sdk v1.9.0
  18. * 2025-03-24 HPMicro Added ADMA3 support, added interrupt driven mode
  19. * 2025-04-11 HPMicro Added the non-cacheable buffer support, avoided dynamic memory allocation at transfer stage
  20. */
  21. #include <rtthread.h>
  22. #ifdef BSP_USING_SDXC
  23. #include <rtdbg.h>
  24. #include <rtdevice.h>
  25. #include "board.h"
  26. #include "hpm_sdxc_drv.h"
  27. #include "hpm_l1c_drv.h"
  28. #define CACHELINE_SIZE HPM_L1C_CACHELINE_SIZE
  29. #define SDXC_ADMA_TABLE_WORDS SDXC_AMDA3_DESC_MIN_WORDS
  30. #define SDXC_AMDA_ADDR_ALIGNMENT (4U)
  31. #define SDXC_ADMA_XFER_SIZE_ALIGNMENT (4U)
  32. #define SDXC_DATA_TIMEOUT (1000) /* 1000ms */
  33. #define SDMMC_DEFAULT_SECTOR_SIZE (512U)
  34. #define SDXC_CACHELINE_ALIGN_DOWN(x) HPM_L1C_CACHELINE_ALIGN_DOWN(x)
  35. #define SDXC_CACHELINE_ALIGN_UP(x) HPM_L1C_CACHELINE_ALIGN_UP(x)
  36. #define SDXC_IS_CACHELINE_ALIGNED(n) ((uint32_t)(n) % (uint32_t)(CACHELINE_SIZE) == 0U)
  37. struct hpm_mmcsd
  38. {
  39. struct rt_mmcsd_host *host;
  40. struct rt_mmcsd_req *req;
  41. struct rt_mmcsd_cmd *cmd;
  42. struct rt_timer *timer;
  43. char name[RT_NAME_MAX];
  44. rt_uint32_t *buf;
  45. SDXC_Type *sdxc_base;
  46. int32_t irq_num;
  47. uint32_t *sdxc_adma_table;
  48. bool support_8bit;
  49. bool support_4bit;
  50. bool support_1v8;
  51. bool support_3v3;
  52. uint8_t power_mode;
  53. uint8_t bus_width;
  54. uint8_t timing;
  55. uint8_t bus_mode;
  56. uint32_t freq;
  57. uint16_t vdd;
  58. const char *vsel_pin_name;
  59. const char *pwr_pin_name;
  60. bool enable_interrupt_driven;
  61. bool use_noncacheable_buf;
  62. uint8_t *data_buf;
  63. uint32_t data_buf_size;
  64. uint8_t irq_priority;
  65. rt_event_t xfer_event;
  66. };
  67. /**
  68. * @brief SDIO CMD53 argument
  69. */
  70. typedef union
  71. {
  72. uint32_t value;
  73. struct
  74. {
  75. uint32_t count :9;
  76. uint32_t reg_addr :17;
  77. uint32_t op_code :1;
  78. uint32_t block_mode :1;
  79. uint32_t func_num :3;
  80. uint32_t rw_flag :1;
  81. };
  82. } sdio_cmd53_arg_t;
  83. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req);
  84. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg);
  85. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en);
  86. static void hpm_sdmmc_host_recovery(SDXC_Type *base);
  87. static hpm_stat_t hpm_sdmmc_transfer_polling(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  88. static hpm_stat_t hpm_sdmmc_transfer_interrupt_driven(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  89. static hpm_stat_t hpm_sdmmc_transfer(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer);
  90. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode);
  91. static rt_err_t hpm_sdmmc_signal_voltage_switch(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg);
  92. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd);
  93. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd);
  94. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd);
  95. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd);
  96. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output);
  97. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value);
  98. static void hpm_sdmmc_pin_init(const char *pin_name, bool is_output)
  99. {
  100. rt_base_t pin = rt_pin_get(pin_name);
  101. if (pin < 0) {
  102. return;
  103. }
  104. rt_uint8_t mode = is_output ? PIN_MODE_OUTPUT : PIN_MODE_INPUT_PULLUP;
  105. rt_pin_mode(pin, mode);
  106. }
  107. static void hpm_sdmmc_pin_write(const char *pin_name, rt_uint8_t value)
  108. {
  109. rt_base_t pin = rt_pin_get(pin_name);
  110. if (pin < 0)
  111. {
  112. return;
  113. }
  114. rt_pin_write(pin, value);
  115. }
  116. static void hpm_sdmmc_power_on_via_pin(struct hpm_mmcsd *mmcsd)
  117. {
  118. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 1);
  119. }
  120. static void hpm_sdmmc_power_off_via_pin(struct hpm_mmcsd *mmcsd)
  121. {
  122. hpm_sdmmc_pin_write(mmcsd->pwr_pin_name, 0);
  123. }
  124. static void hpm_sdmmc_switch_to_3v3_via_pin(struct hpm_mmcsd *mmcsd)
  125. {
  126. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 0);
  127. }
  128. static void hpm_sdmmc_switch_to_1v8_via_pin(struct hpm_mmcsd *mmcsd)
  129. {
  130. hpm_sdmmc_pin_write(mmcsd->vsel_pin_name, 1);
  131. }
  132. static rt_err_t hpm_sdmmc_signal_voltage_switch(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
  133. {
  134. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  135. SDXC_Type *base = mmcsd->sdxc_base;
  136. /* 1. Stop providing clock to the card */
  137. sdxc_enable_inverse_clock(mmcsd->sdxc_base, false);
  138. sdxc_enable_sd_clock(mmcsd->sdxc_base, false);
  139. /* 2. Wait until DAT[3:0] are 4'b0000 */
  140. uint32_t data3_0_level;
  141. uint32_t delay_cnt = 1000000UL;
  142. do
  143. {
  144. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  145. --delay_cnt;
  146. } while ((data3_0_level != 0U) && (delay_cnt > 0U));
  147. if (delay_cnt < 1)
  148. {
  149. return -RT_ETIMEOUT;
  150. }
  151. /* 3. Switch to 1.8V/3.3V */
  152. if (ios->signal_voltage == MMCSD_SIGNAL_VOLTAGE_330)
  153. {
  154. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  155. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_3v3);
  156. }
  157. else
  158. {
  159. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  160. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  161. }
  162. /* 4. spec:host delay 5ms, host: give more delay time here */
  163. rt_thread_mdelay(10);
  164. /* 5. Provide SD clock the card again */
  165. sdxc_enable_inverse_clock(mmcsd->sdxc_base, true);
  166. sdxc_enable_sd_clock(mmcsd->sdxc_base, true);
  167. /* 6. spec: wait 1ms, host: give more delay time here */
  168. rt_thread_mdelay(5);
  169. /* 7. Check DAT[3:0], make sure the value is 4'b0000 */
  170. delay_cnt = 1000000UL;
  171. do
  172. {
  173. data3_0_level = sdxc_get_data3_0_level(mmcsd->sdxc_base);
  174. --delay_cnt;
  175. } while ((data3_0_level == 0U) && (delay_cnt > 0U));
  176. if (delay_cnt < 1)
  177. {
  178. return -RT_ETIMEOUT;
  179. }
  180. return RT_EOK;
  181. }
  182. static const struct rt_mmcsd_host_ops hpm_mmcsd_host_ops =
  183. {
  184. .request = hpm_sdmmc_request,
  185. .set_iocfg = hpm_sdmmc_set_iocfg,
  186. .get_card_status = NULL,
  187. .enable_sdio_irq = hpm_sdmmc_enable_sdio_irq,
  188. .execute_tuning = hpm_sdmmc_execute_tuning,
  189. .signal_voltage_switch = hpm_sdmmc_signal_voltage_switch,
  190. };
  191. void hpm_sdmmc_isr(struct hpm_mmcsd *mmcsd)
  192. {
  193. SDXC_Type *base = mmcsd->sdxc_base;
  194. uint32_t int_stat = sdxc_get_interrupt_status(base);
  195. uint32_t int_signal_en = sdxc_get_interrupt_signal(base);
  196. if (((int_stat & SDXC_INT_STAT_CARD_INTERRUPT_MASK) != 0) &&
  197. ((int_signal_en & SDXC_INT_STAT_CARD_INTERRUPT_MASK) != 0))
  198. {
  199. hpm_sdmmc_enable_sdio_irq(mmcsd->host, 0);
  200. rt_sem_release(mmcsd->host->sdio_irq_sem);
  201. }
  202. if (mmcsd->enable_interrupt_driven)
  203. {
  204. const uint32_t xfer_done_or_err_int_mask = SDXC_INT_STAT_CMD_COMPLETE_MASK \
  205. | SDXC_INT_STAT_XFER_COMPLETE_MASK \
  206. | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  207. if (((int_signal_en & xfer_done_or_err_int_mask) != 0U) && ((int_stat & xfer_done_or_err_int_mask) != 0U)) {
  208. uint32_t event_flags = int_stat & xfer_done_or_err_int_mask;
  209. rt_event_send(mmcsd->xfer_event, event_flags);
  210. sdxc_clear_interrupt_status(base, event_flags);
  211. }
  212. }
  213. }
  214. #if defined(BSP_USING_SDXC0)
  215. void sdxc0_isr(void);
  216. /* Place the ADMA table to non-cacheable region */
  217. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8) static uint32_t s_sdxc0_adma_table[SDXC_ADMA_TABLE_WORDS];
  218. #if defined(BSP_SDXC0_USE_NONCACHEABLE_BUFFER)
  219. #if defined(BSP_SDXC0_NONCACHEABLE_BUF_IN_FAST_RAM)
  220. ATTR_PLACE_AT_FAST_RAM_WITH_ALIGNMENT(8)
  221. #else
  222. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8)
  223. #endif
  224. static uint8_t s_sdxc0_noncacheable_buf[BSP_SDXC0_NONCACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE];
  225. #endif
  226. /* SDXC0 */
  227. static struct hpm_mmcsd s_hpm_sdxc0 =
  228. {
  229. .name = "sd0",
  230. .sdxc_base = HPM_SDXC0,
  231. .sdxc_adma_table = s_sdxc0_adma_table,
  232. .irq_num = IRQn_SDXC0,
  233. #if defined(BSP_SDXC0_BUS_WIDTH_8BIT)
  234. .support_8bit = true,
  235. .support_4bit = true,
  236. #elif defined(BSP_SDXC0_BUS_WIDTH_4BIT)
  237. .support_4bit = true,
  238. #elif defined(BSP_SDXC0_BUS_WIDTH_1BIT)
  239. #else
  240. .support_4bit = true,
  241. #endif
  242. #if defined(BSP_SDXC0_VOLTAGE_3V3)
  243. .support_3v3 = true,
  244. #endif
  245. #if defined(BSP_SDXC0_VOLTAGE_1V8)
  246. .support_1v8 = true,
  247. #endif
  248. #if defined(BSP_SDXC0_VOLTAGE_DUAL)
  249. .support_3v3 = true,
  250. .support_1v8 = true,
  251. #endif
  252. #if defined(BSP_SDXC0_VSEL_PIN)
  253. .vsel_pin_name = BSP_SDXC0_VSEL_PIN,
  254. #endif
  255. #if defined(BSP_SDXC0_PWR_PIN)
  256. .pwr_pin_name = BSP_SDXC0_PWR_PIN,
  257. #endif
  258. #if defined(BSP_SDXC0_ENABLE_INTERRUPT_DRIVEN)
  259. .enable_interrupt_driven = true,
  260. #endif
  261. #if defined(BSP_SDXC0_REQUIRE_CACHELINE_ALIGNED_BUF)
  262. .require_cacheline_aligned_buf = true,
  263. #endif
  264. #if defined(BSP_SDXC0_IRQ_PRIORITY)
  265. .irq_priority = BSP_SDXC0_IRQ_PRIORITY,
  266. #else
  267. .irq_priority = 1,
  268. #endif
  269. #if defined (BSP_SDXC0_USE_NONCACHEABLE_BUFFER)
  270. .use_noncacheable_buf = true,
  271. .data_buf = s_sdxc0_noncacheable_buf,
  272. .data_buf_size = sizeof(s_sdxc0_noncacheable_buf),
  273. #endif
  274. #if defined(BSP_SDXC0_USE_CACHEABLE_BUFFER)
  275. .data_buf_size = BSP_SDXC0_CACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE,
  276. #endif
  277. };
  278. SDK_DECLARE_EXT_ISR_M(IRQn_SDXC0, sdxc0_isr);
  279. void sdxc0_isr(void)
  280. {
  281. hpm_sdmmc_isr(&s_hpm_sdxc0);
  282. }
  283. #endif
  284. #if defined(BSP_USING_SDXC1)
  285. void sdxc1_isr(void);
  286. /* Place the ADMA table to non-cacheable region */
  287. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8) static uint32_t s_sdxc1_adma_table[SDXC_ADMA_TABLE_WORDS];
  288. #if defined(BSP_SDXC1_NONCACHEABLE_BUFFER_SIZE_IN_SECTOR)
  289. #if defined(BSP_SDXC1_USE_NONCACHEABLE_BUFFER)
  290. ATTR_PLACE_AT_FAST_RAM_WITH_ALIGNMENT(8)
  291. #else
  292. ATTR_PLACE_AT_NONCACHEABLE_WITH_ALIGNMENT(8)
  293. #endif
  294. static uint8_t s_sdxc1_noncacheable_buf[BSP_SDXC1_NONCACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE];
  295. #endif
  296. static struct hpm_mmcsd s_hpm_sdxc1 =
  297. {
  298. .name = "sd1",
  299. .sdxc_base = HPM_SDXC1,
  300. .sdxc_adma_table = s_sdxc1_adma_table,
  301. .irq_num = IRQn_SDXC1,
  302. #if defined(BSP_SDXC1_BUS_WIDTH_8BIT)
  303. .support_8bit = true,
  304. .support_4bit = true,
  305. #elif defined(BSP_SDXC1_BUS_WIDTH_4BIT)
  306. .support_4bit = true,
  307. #elif defined(BSP_SDXC1_BUS_WIDTH_1BIT)
  308. #else
  309. .support_4bit = true,
  310. #endif
  311. #if defined(BSP_SDXC1_VOLTAGE_3V3)
  312. .support_3v3 = true,
  313. #endif
  314. #if defined(BSP_SDXC1_VOLTAGE_1V8)
  315. .support_1v8 = true,
  316. #endif
  317. #if defined(BSP_SDXC1_VOLTAGE_DUAL)
  318. .support_3v3 = true,
  319. .support_1v8 = true,
  320. #endif
  321. #if defined(BSP_SDXC1_VSEL_PIN)
  322. .vsel_pin_name = BSP_SDXC1_VSEL_PIN,
  323. #endif
  324. #if defined(BSP_SDXC1_PWR_PIN)
  325. .pwr_pin_name = BSP_SDXC1_PWR_PIN,
  326. #endif
  327. #if defined(BSP_SDXC1_ENABLE_INTERRUPT_DRIVEN)
  328. .enable_interrupt_driven = true,
  329. #endif
  330. #if defined(BSP_SDXC1_REQUIRE_CACHELINE_ALIGNED_BUF)
  331. .require_cacheline_aligned_buf = true,
  332. #endif
  333. #if defined(BSP_SDXC1_IRQ_PRIORITY)
  334. .irq_priority = BSP_SDXC1_IRQ_PRIORITY,
  335. #else
  336. .irq_priority = 1,
  337. #endif
  338. #if defined (BSP_SDXC1_USE_NONCACHEABLE_BUFFER)
  339. .use_noncacheable_buf = true,
  340. .data_buf = s_sdxc1_noncacheable_buf,
  341. .data_buf_size = sizeof(s_sdxc1_noncacheable_buf),
  342. #endif
  343. #if defined(BSP_SDXC1_USE_CACHEABLE_BUFFER)
  344. .data_buf_size = BSP_SDXC1_CACHEABLE_BUFFER_SIZE_IN_SECTOR * SDMMC_DEFAULT_SECTOR_SIZE,
  345. #endif
  346. };
  347. SDK_DECLARE_EXT_ISR_M(IRQn_SDXC1, sdxc1_isr);
  348. void sdxc1_isr(void)
  349. {
  350. hpm_sdmmc_isr(&s_hpm_sdxc1);
  351. }
  352. #endif
  353. static struct hpm_mmcsd *hpm_sdxcs[] =
  354. {
  355. #if defined(BSP_USING_SDXC0)
  356. &s_hpm_sdxc0,
  357. #endif
  358. #if defined(BSP_USING_SDXC1)
  359. &s_hpm_sdxc1,
  360. #endif
  361. };
  362. static rt_int32_t hpm_sdmmc_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
  363. {
  364. RT_ASSERT(host != RT_NULL);
  365. RT_ASSERT(host->private_data != RT_NULL);
  366. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  367. SDXC_Type *base = mmcsd->sdxc_base;
  368. RT_ASSERT(base != RT_NULL);
  369. /* Prepare the Auto tuning environment */
  370. sdxc_stop_clock_during_phase_code_change(base, true);
  371. sdxc_set_post_change_delay(base, 3U);
  372. sdxc_select_cardclk_delay_source(base, false);
  373. sdxc_enable_power(base, true);
  374. hpm_stat_t err = sdxc_perform_auto_tuning(base, opcode);
  375. return (err != status_success) ? -RT_EPERM : RT_EOK;
  376. }
  377. static hpm_stat_t hpm_sdmmc_transfer_polling(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  378. {
  379. hpm_stat_t status;
  380. SDXC_Type *base = mmcsd->sdxc_base;
  381. sdxc_command_t *cmd = xfer->command;
  382. sdxc_data_t *data = xfer->data;
  383. volatile uint32_t interrupt_status;
  384. volatile rt_tick_t start_tick;
  385. rt_tick_t current_tick;
  386. bool need_chk_xfer_stat = false;
  387. sdxc_clear_interrupt_status(base, ~0UL);
  388. if (data == NULL)
  389. {
  390. (void)sdxc_send_command(base, cmd);
  391. /* Wait until idle */
  392. start_tick = rt_tick_get();
  393. do
  394. {
  395. interrupt_status = sdxc_get_interrupt_status(base);
  396. status = sdxc_parse_interrupt_status(base);
  397. HPM_BREAK_IF(status != status_success);
  398. current_tick = rt_tick_get();
  399. if ((current_tick - start_tick) > RT_TICK_PER_SECOND) {
  400. status = status_timeout;
  401. break;
  402. }
  403. } while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_CMD_COMPLETE_MASK));
  404. if ((status == status_success) && (cmd->resp_type == sdxc_dev_resp_r1b))
  405. {
  406. need_chk_xfer_stat = true;
  407. }
  408. }
  409. else
  410. {
  411. status = sdxc_transfer_nonblocking(base, dma_config, xfer);
  412. if (status != status_success)
  413. {
  414. return status;
  415. }
  416. if (dma_config->dma_type == sdxc_dmasel_adma2)
  417. {
  418. /* Wait until idle */
  419. interrupt_status = sdxc_get_interrupt_status(base);
  420. start_tick = rt_tick_get();
  421. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_CMD_COMPLETE_MASK))
  422. {
  423. interrupt_status = sdxc_get_interrupt_status(base);
  424. status = sdxc_parse_interrupt_status(base);
  425. HPM_BREAK_IF(status != status_success);
  426. current_tick = rt_tick_get();
  427. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  428. {
  429. status = status_timeout;
  430. break;
  431. }
  432. }
  433. }
  434. if (status == status_success)
  435. {
  436. need_chk_xfer_stat = true;
  437. }
  438. }
  439. if (need_chk_xfer_stat)
  440. {
  441. interrupt_status = sdxc_get_interrupt_status(base);
  442. start_tick = rt_tick_get();
  443. while (!IS_HPM_BITMASK_SET(interrupt_status, SDXC_INT_STAT_XFER_COMPLETE_MASK | SDXC_STS_ERROR))
  444. {
  445. interrupt_status = sdxc_get_interrupt_status(base);
  446. status = sdxc_parse_interrupt_status(base);
  447. HPM_BREAK_IF(status != status_success);
  448. current_tick = rt_tick_get();
  449. if ((current_tick - start_tick) > RT_TICK_PER_SECOND)
  450. {
  451. status = status_timeout;
  452. break;
  453. }
  454. }
  455. }
  456. if (status == status_success)
  457. {
  458. status = sdxc_receive_cmd_response(base, cmd);
  459. }
  460. return status;
  461. }
  462. static hpm_stat_t hpm_sdmmc_transfer_interrupt_driven(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  463. {
  464. hpm_stat_t status;
  465. SDXC_Type *base = mmcsd->sdxc_base;
  466. sdxc_command_t *cmd = xfer->command;
  467. sdxc_data_t *data = xfer->data;
  468. bool need_chk_xfer_stat = false;
  469. sdxc_clear_interrupt_status(base, ~0UL);
  470. if (data == NULL)
  471. {
  472. (void)sdxc_send_command(base, cmd);
  473. /* Wait until idle */
  474. const uint32_t wait_event_flags = SDXC_INT_STAT_CMD_COMPLETE_MASK \
  475. | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  476. rt_err_t err = rt_event_recv(mmcsd->xfer_event, wait_event_flags, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_TICK_PER_SECOND, NULL);
  477. if (err == RT_EOK)
  478. {
  479. status = sdxc_parse_interrupt_status(base);
  480. }
  481. else
  482. {
  483. status = status_timeout;
  484. }
  485. if ((status == status_success) && (cmd->resp_type == sdxc_dev_resp_r1b))
  486. {
  487. need_chk_xfer_stat = true;
  488. }
  489. }
  490. else
  491. {
  492. status = sdxc_transfer_nonblocking(base, dma_config, xfer);
  493. if (status != status_success)
  494. {
  495. return status;
  496. }
  497. if (dma_config->dma_type == sdxc_dmasel_adma2)
  498. {
  499. const uint32_t wait_event_flags = SDXC_INT_STAT_CMD_COMPLETE_MASK | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  500. rt_err_t err = rt_event_recv(mmcsd->xfer_event, wait_event_flags, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_TICK_PER_SECOND, NULL);
  501. if (err == RT_EOK)
  502. {
  503. status = sdxc_parse_interrupt_status(base);
  504. }
  505. else
  506. {
  507. status = status_timeout;
  508. }
  509. }
  510. if (status == status_success)
  511. {
  512. need_chk_xfer_stat = true;
  513. }
  514. }
  515. if (need_chk_xfer_stat)
  516. {
  517. const uint32_t wait_event_flags = SDXC_INT_STAT_XFER_COMPLETE_MASK | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  518. rt_err_t err = rt_event_recv(mmcsd->xfer_event, wait_event_flags, RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, RT_TICK_PER_SECOND, NULL);
  519. if (err == RT_EOK)
  520. {
  521. status = sdxc_parse_interrupt_status(base);
  522. }
  523. }
  524. if (status == status_success)
  525. {
  526. status = sdxc_receive_cmd_response(base, cmd);
  527. }
  528. return status;
  529. }
  530. static hpm_stat_t hpm_sdmmc_transfer(struct hpm_mmcsd *mmcsd, sdxc_adma_config_t *dma_config, sdxc_xfer_t *xfer)
  531. {
  532. if (mmcsd->enable_interrupt_driven)
  533. {
  534. return hpm_sdmmc_transfer_interrupt_driven(mmcsd, dma_config, xfer);
  535. }
  536. else
  537. {
  538. return hpm_sdmmc_transfer_polling(mmcsd, dma_config, xfer);
  539. }
  540. }
  541. /**
  542. * !@brief SDMMC request implementation based on HPMicro SDXC Host
  543. */
  544. static void hpm_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
  545. {
  546. RT_ASSERT(host != RT_NULL);
  547. RT_ASSERT(host->private_data != RT_NULL);
  548. RT_ASSERT(req != RT_NULL);
  549. RT_ASSERT(req->cmd != RT_NULL);
  550. sdxc_adma_config_t adma_config = { 0 };
  551. sdxc_xfer_t xfer = { 0 };
  552. sdxc_command_t sdxc_cmd = { 0 };
  553. sdxc_data_t sdxc_data = { 0 };
  554. bool need_copy_back = false;
  555. hpm_stat_t err = status_invalid_argument;
  556. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  557. struct rt_mmcsd_cmd *cmd = req->cmd;
  558. struct rt_mmcsd_data *data = cmd->data;
  559. /* configure command */
  560. sdxc_cmd.cmd_index = cmd->cmd_code;
  561. sdxc_cmd.cmd_argument = cmd->arg;
  562. sdxc_cmd.cmd_type = (cmd->cmd_code == STOP_TRANSMISSION) ? sdxc_cmd_type_abort_cmd : sdxc_cmd_type_normal_cmd;
  563. switch (cmd->flags & RESP_MASK)
  564. {
  565. case RESP_NONE:
  566. sdxc_cmd.resp_type = sdxc_dev_resp_none;
  567. break;
  568. case RESP_R1:
  569. sdxc_cmd.resp_type = sdxc_dev_resp_r1;
  570. break;
  571. case RESP_R1B:
  572. sdxc_cmd.resp_type = sdxc_dev_resp_r1b;
  573. break;
  574. case RESP_R2:
  575. sdxc_cmd.resp_type = sdxc_dev_resp_r2;
  576. break;
  577. case RESP_R3:
  578. sdxc_cmd.resp_type = sdxc_dev_resp_r3;
  579. break;
  580. case RESP_R4:
  581. sdxc_cmd.resp_type = sdxc_dev_resp_r4;
  582. break;
  583. case RESP_R6:
  584. sdxc_cmd.resp_type = sdxc_dev_resp_r6;
  585. break;
  586. case RESP_R7:
  587. sdxc_cmd.resp_type = sdxc_dev_resp_r7;
  588. break;
  589. case RESP_R5:
  590. sdxc_cmd.resp_type = sdxc_dev_resp_r5;
  591. break;
  592. default:
  593. RT_ASSERT(NULL);
  594. break;
  595. }
  596. sdxc_cmd.cmd_flags = 0UL;
  597. xfer.command = &sdxc_cmd;
  598. xfer.data = NULL;
  599. if (data != NULL)
  600. {
  601. sdxc_data.enable_auto_cmd12 = false;
  602. sdxc_data.enable_auto_cmd23 = false;
  603. sdxc_data.enable_ignore_error = false;
  604. sdxc_data.block_size = data->blksize;
  605. sdxc_data.block_cnt = data->blks;
  606. /* configure adma3 */
  607. adma_config.dma_type = sdxc_dmasel_adma3;
  608. adma_config.adma_table = (uint32_t*) core_local_mem_to_sys_address(BOARD_RUNNING_CORE,
  609. (uint32_t) mmcsd->sdxc_adma_table);
  610. adma_config.adma_table_words = SDXC_ADMA_TABLE_WORDS;
  611. size_t xfer_buf_addr = (uint32_t)data->buf;
  612. uint32_t xfer_len = data->blks * data->blksize;
  613. bool need_cache_maintenance = true;
  614. if ((req->data->flags & DATA_DIR_WRITE) != 0U)
  615. {
  616. uint32_t write_size = xfer_len;
  617. size_t aligned_start;
  618. uint32_t aligned_size;
  619. if ((xfer_buf_addr % CACHELINE_SIZE != 0) || (write_size % CACHELINE_SIZE != 0))
  620. {
  621. uint32_t write_size = xfer_len;
  622. aligned_size = SDXC_CACHELINE_ALIGN_UP(write_size);
  623. rt_memcpy(mmcsd->data_buf, data->buf, xfer_len);
  624. rt_memset(&mmcsd->data_buf[write_size], 0, aligned_size - write_size);
  625. sdxc_data.tx_data = (uint32_t const *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)mmcsd->data_buf);
  626. aligned_start = (uint32_t)sdxc_data.tx_data;
  627. need_cache_maintenance = !mmcsd->use_noncacheable_buf;
  628. }
  629. else
  630. {
  631. sdxc_data.tx_data = (uint32_t const *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  632. aligned_start = (uint32_t)sdxc_data.tx_data;
  633. aligned_size = write_size;
  634. }
  635. if (need_cache_maintenance)
  636. {
  637. l1c_dc_flush(aligned_start, aligned_size);
  638. }
  639. sdxc_data.rx_data = NULL;
  640. }
  641. else
  642. {
  643. uint32_t read_size = xfer_len;
  644. uint32_t aligned_read_size;
  645. if ((xfer_buf_addr % CACHELINE_SIZE != 0) || (read_size % CACHELINE_SIZE != 0))
  646. {
  647. aligned_read_size = SDXC_CACHELINE_ALIGN_UP(read_size);
  648. sdxc_data.rx_data = (uint32_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, (uint32_t)mmcsd->data_buf);
  649. need_copy_back = true;
  650. need_cache_maintenance = !mmcsd->use_noncacheable_buf;
  651. }
  652. else
  653. {
  654. aligned_read_size = read_size;
  655. sdxc_data.rx_data = (uint32_t *)core_local_mem_to_sys_address(BOARD_RUNNING_CORE, xfer_buf_addr);
  656. }
  657. /* Invalidate cache-line for the new allocated buffer */
  658. if (need_cache_maintenance)
  659. {
  660. l1c_dc_invalidate((uint32_t)sdxc_data.rx_data, aligned_read_size);
  661. }
  662. sdxc_data.tx_data = RT_NULL;
  663. }
  664. xfer.data = &sdxc_data;
  665. /* Align the write/read size since the ADMA engine in the SDXC cannot transfer unaligned size of data */
  666. if ((cmd->cmd_code == SD_IO_RW_EXTENDED) && (xfer_len % SDXC_ADMA_XFER_SIZE_ALIGNMENT != 0))
  667. {
  668. sdio_cmd53_arg_t cmd53_arg;
  669. cmd53_arg.value = sdxc_cmd.cmd_argument;
  670. cmd53_arg.count = HPM_ALIGN_UP(xfer_len, SDXC_ADMA_XFER_SIZE_ALIGNMENT);
  671. sdxc_cmd.cmd_argument = cmd53_arg.value;
  672. sdxc_data.block_size = HPM_ALIGN_UP(xfer_len, SDXC_ADMA_XFER_SIZE_ALIGNMENT);
  673. }
  674. }
  675. if ((req->data->blks > 1) && ((cmd->cmd_code == READ_MULTIPLE_BLOCK) || ((cmd->cmd_code == WRITE_MULTIPLE_BLOCK))))
  676. {
  677. xfer.data->enable_auto_cmd12 = true;
  678. }
  679. err = hpm_sdmmc_transfer(mmcsd, &adma_config, &xfer);
  680. LOG_I("cmd=%d, arg=%x\n", cmd->cmd_code, cmd->arg);
  681. if (err != status_success)
  682. {
  683. hpm_sdmmc_host_recovery(mmcsd->sdxc_base);
  684. if (err != status_sdxc_cmd_timeout_error) /* Ignore command timeout error by default */
  685. {
  686. LOG_E(" ***hpm_sdmmc_transfer error: %d, cmd:%d, arg:0x%x*** -->\n", err, cmd->cmd_code, cmd->arg);
  687. }
  688. cmd->err = -RT_ERROR;
  689. }
  690. else
  691. {
  692. LOG_I(" ***hpm_sdmmc_transfer passed: %d*** -->\n", err);
  693. if (sdxc_cmd.resp_type == sdxc_dev_resp_r2)
  694. {
  695. LOG_I("resp:0x%08x 0x%08x 0x%08x 0x%08x\n", sdxc_cmd.response[0],
  696. sdxc_cmd.response[1],
  697. sdxc_cmd.response[2],
  698. sdxc_cmd.response[3]);
  699. }
  700. else
  701. {
  702. LOG_I("resp:0x%08x\n", sdxc_cmd.response[0]);
  703. }
  704. }
  705. if ((sdxc_data.rx_data != NULL) && (cmd->err == RT_EOK))
  706. {
  707. uint32_t read_size = data->blks * data->blksize;
  708. if (need_copy_back)
  709. {
  710. rt_memcpy(data->buf, mmcsd->data_buf, read_size);
  711. }
  712. }
  713. if ((cmd->flags & RESP_MASK) == RESP_R2)
  714. {
  715. cmd->resp[3] = sdxc_cmd.response[0];
  716. cmd->resp[2] = sdxc_cmd.response[1];
  717. cmd->resp[1] = sdxc_cmd.response[2];
  718. cmd->resp[0] = sdxc_cmd.response[3];
  719. }
  720. else
  721. {
  722. cmd->resp[0] = sdxc_cmd.response[0];
  723. }
  724. mmcsd_req_complete(host);
  725. }
  726. static void hpm_sdmmc_set_cardclk_delay_chain(struct hpm_mmcsd *mmcsd)
  727. {
  728. SDXC_Type *base = mmcsd->sdxc_base;
  729. bool need_inverse = sdxc_is_inverse_clock_enabled(base);
  730. sdxc_enable_inverse_clock(base, false);
  731. sdxc_enable_sd_clock(base, false);
  732. uint32_t num_delaycells = sdxc_get_default_cardclk_delay_chain(base, mmcsd->freq);
  733. sdxc_set_cardclk_delay_chain(base, num_delaycells);
  734. sdxc_enable_inverse_clock(base, need_inverse);
  735. sdxc_enable_sd_clock(base, true);
  736. }
  737. ATTR_WEAK void init_sdxc_ds_pin(SDXC_Type *base)
  738. {
  739. LOG_W("Ignore this warning if the DS pin is not supported\n");
  740. }
  741. /**
  742. * !@brief Set IO Configuration for HPMicro IO and SDXC Host
  743. */
  744. static void hpm_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
  745. {
  746. RT_ASSERT(host != RT_NULL);
  747. RT_ASSERT(host->private_data != RT_NULL);
  748. RT_ASSERT(io_cfg != RT_NULL);
  749. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  750. /* Power control */
  751. uint32_t vdd = io_cfg->vdd;
  752. if (io_cfg->power_mode != mmcsd->power_mode)
  753. {
  754. switch(io_cfg->power_mode)
  755. {
  756. case MMCSD_POWER_OFF:
  757. hpm_sdmmc_power_off_via_pin(mmcsd);
  758. break;
  759. case MMCSD_POWER_ON:
  760. hpm_sdmmc_power_on_via_pin(mmcsd);
  761. break;
  762. case MMCSD_POWER_UP:
  763. hpm_sdmmc_power_off_via_pin(mmcsd);
  764. rt_thread_mdelay(10);
  765. hpm_sdmmc_power_on_via_pin(mmcsd);
  766. /* After power up, wait 1ms, then wait 74 card clock */
  767. rt_thread_mdelay(1);
  768. sdxc_wait_card_active(mmcsd->sdxc_base);
  769. break;
  770. default:
  771. /* Do nothing */
  772. break;
  773. }
  774. mmcsd->power_mode = io_cfg->power_mode;
  775. }
  776. /* Voltage switch */
  777. if (mmcsd->vdd != vdd)
  778. {
  779. if (vdd == 7)
  780. {
  781. /* Switch to 1.8V */
  782. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  783. }
  784. else
  785. {
  786. /* Switch to 3V */
  787. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  788. }
  789. mmcsd->vdd = vdd;
  790. }
  791. /* Set bus width */
  792. if (mmcsd->bus_width != io_cfg->bus_width)
  793. {
  794. switch (io_cfg->bus_width)
  795. {
  796. case MMCSD_BUS_WIDTH_4:
  797. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_4bit);
  798. break;
  799. case MMCSD_BUS_WIDTH_8:
  800. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_8bit);
  801. break;
  802. default:
  803. sdxc_set_data_bus_width(mmcsd->sdxc_base, sdxc_bus_width_1bit);
  804. break;
  805. }
  806. mmcsd->bus_width = io_cfg->bus_width;
  807. }
  808. /* Set timing mode */
  809. bool need_config_ds = false;
  810. if (mmcsd->timing != io_cfg->timing)
  811. {
  812. switch (io_cfg->timing)
  813. {
  814. case MMCSD_TIMING_LEGACY:
  815. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_normal);
  816. break;
  817. case MMCSD_TIMING_SD_HS:
  818. case MMCSD_TIMING_MMC_HS:
  819. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_high);
  820. break;
  821. case MMCSD_TIMING_UHS_SDR12:
  822. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr12);
  823. break;
  824. case MMCSD_TIMING_UHS_SDR25:
  825. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr25);
  826. break;
  827. case MMCSD_TIMING_UHS_SDR50:
  828. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr50);
  829. break;
  830. case MMCSD_TIMING_UHS_SDR104:
  831. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_sdr104);
  832. break;
  833. case MMCSD_TIMING_UHS_DDR50:
  834. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_sd_speed_ddr50);
  835. /* Must switch to 1.8V signaling for UHS_DDR50 */
  836. sdxc_select_voltage(mmcsd->sdxc_base, sdxc_bus_voltage_sd_1v8);
  837. break;
  838. case MMCSD_TIMING_MMC_DDR52:
  839. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  840. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_high_speed_ddr);
  841. break;
  842. case MMCSD_TIMING_MMC_HS200:
  843. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  844. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs200);
  845. break;
  846. case MMCSD_TIMING_MMC_HS400:
  847. case MMCSD_TIMING_MMC_HS400_ENH_DS:
  848. sdxc_enable_emmc_support(mmcsd->sdxc_base, true);
  849. sdxc_set_speed_mode(mmcsd->sdxc_base, sdxc_emmc_speed_hs400);
  850. if (io_cfg->timing == MMCSD_TIMING_MMC_HS400_ENH_DS)
  851. {
  852. sdxc_enable_enhanced_strobe(mmcsd->sdxc_base, true);
  853. uint32_t num_delaycells = sdxc_get_default_strobe_delay(mmcsd->sdxc_base);
  854. sdxc_set_data_strobe_delay(mmcsd->sdxc_base, num_delaycells);
  855. }
  856. need_config_ds = true;
  857. break;
  858. }
  859. mmcsd->timing = io_cfg->timing;
  860. }
  861. /* Initialize SDXC Pins */
  862. bool open_drain = io_cfg->bus_mode == MMCSD_BUSMODE_OPENDRAIN;
  863. bool is_1v8 = (io_cfg->vdd == 7) || (mmcsd->host->valid_ocr == VDD_165_195);
  864. uint32_t width = (io_cfg->bus_width == MMCSD_BUS_WIDTH_8) ? 8 : ((io_cfg->bus_width == MMCSD_BUS_WIDTH_4) ? 4 : 1);
  865. init_sdxc_cmd_pin(mmcsd->sdxc_base, open_drain, is_1v8);
  866. init_sdxc_clk_data_pins(mmcsd->sdxc_base, width, is_1v8);
  867. rt_thread_mdelay(1);
  868. if (need_config_ds)
  869. {
  870. init_sdxc_ds_pin(mmcsd->sdxc_base);
  871. rt_thread_mdelay(1);
  872. }
  873. /* Initialize SDXC clock */
  874. uint32_t sdxc_clock = io_cfg->clock;
  875. if (sdxc_clock != 0U)
  876. {
  877. if (mmcsd->freq != sdxc_clock)
  878. {
  879. bool need_reverse = true;
  880. bool need_card_delay_clk = false;
  881. if ((mmcsd->timing == MMCSD_TIMING_UHS_DDR50) ||
  882. (mmcsd->timing == MMCSD_TIMING_MMC_DDR52) ||
  883. (mmcsd->timing == MMCSD_TIMING_MMC_HS400) ||
  884. (mmcsd->timing == MMCSD_TIMING_MMC_HS400_ENH_DS))
  885. {
  886. need_reverse = false;
  887. need_card_delay_clk = true;
  888. }
  889. /* Ensure request frequency from mmcsd stack level doesn't exceed maximum supported frequency by host */
  890. uint32_t clock_freq = MIN(mmcsd->host->freq_max, sdxc_clock);
  891. clock_freq = board_sd_configure_clock(mmcsd->sdxc_base, clock_freq, need_reverse);
  892. LOG_I("mmcsd clock: %dHz\n", clock_freq);
  893. mmcsd->freq = sdxc_clock;
  894. if (need_card_delay_clk)
  895. {
  896. hpm_sdmmc_set_cardclk_delay_chain(mmcsd);
  897. }
  898. }
  899. }
  900. }
  901. static void hpm_sdmmc_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
  902. {
  903. RT_ASSERT(host != RT_NULL);
  904. RT_ASSERT(host->private_data != RT_NULL);
  905. struct hpm_mmcsd *mmcsd = (struct hpm_mmcsd *) host->private_data;
  906. RT_ASSERT(mmcsd->sdxc_base != RT_NULL);
  907. SDXC_Type *base = mmcsd->sdxc_base;
  908. if (en != 0)
  909. {
  910. sdxc_enable_interrupt_signal(base, SDXC_INT_STAT_CARD_INTERRUPT_MASK, true);
  911. sdxc_enable_interrupt_status(base, SDXC_INT_STAT_CARD_INTERRUPT_MASK, true);
  912. }
  913. else
  914. {
  915. sdxc_enable_interrupt_status(base, SDXC_INT_STAT_CARD_INTERRUPT_MASK, false);
  916. }
  917. }
  918. static void hpm_sdmmc_host_recovery(SDXC_Type *base)
  919. {
  920. uint32_t pstate = sdxc_get_present_status(base);
  921. bool need_reset_cmd_line = false;
  922. bool need_reset_data_line = false;
  923. if ((pstate & SDXC_PSTATE_CMD_INHIBIT_MASK) != 0U)
  924. {
  925. /* Reset command line */
  926. need_reset_cmd_line = true;
  927. }
  928. if ((pstate & SDXC_PSTATE_DAT_INHIBIT_MASK) != 0U)
  929. {
  930. /* Reset data line */
  931. need_reset_data_line = true;
  932. }
  933. uint32_t int_stat = sdxc_get_interrupt_status(base);
  934. if ((int_stat & 0xF0000UL) != 0U)
  935. {
  936. need_reset_cmd_line = true;
  937. }
  938. if ((int_stat & 0x700000) != 0U)
  939. {
  940. need_reset_data_line = true;
  941. }
  942. if (need_reset_cmd_line)
  943. {
  944. sdxc_reset(base, sdxc_reset_cmd_line, 0xFFFFUL);
  945. }
  946. if (need_reset_data_line)
  947. {
  948. sdxc_reset(base, sdxc_reset_data_line, 0xFFFFUL);
  949. }
  950. if (need_reset_cmd_line || need_reset_data_line)
  951. {
  952. sdxc_clear_interrupt_status(base, ~0UL);
  953. }
  954. rt_thread_mdelay(10);
  955. }
  956. int rt_hw_sdio_init(void)
  957. {
  958. rt_err_t err = RT_EOK;
  959. struct rt_mmcsd_host *host = NULL;
  960. struct hpm_mmcsd *mmcsd = NULL;
  961. for (uint32_t i = 0; i < ARRAY_SIZE(hpm_sdxcs); i++)
  962. {
  963. host = mmcsd_alloc_host();
  964. if (host == NULL)
  965. {
  966. err = -RT_ERROR;
  967. break;
  968. }
  969. mmcsd = hpm_sdxcs[i];
  970. host->ops = &hpm_mmcsd_host_ops;
  971. host->freq_min = 375000;
  972. host->freq_max = 50000000;
  973. host->valid_ocr = 0;
  974. /* Determine supported Voltage range */
  975. if (mmcsd->support_3v3)
  976. {
  977. host->valid_ocr |= VDD_30_31 | VDD_31_32 | VDD_32_33 | VDD_33_34;
  978. }
  979. if (mmcsd->support_1v8)
  980. {
  981. host->valid_ocr |= VDD_165_195;
  982. }
  983. /* Determine Host supported features */
  984. host->flags = MMCSD_MUTBLKWRITE | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
  985. if (mmcsd->support_4bit)
  986. {
  987. host->flags |= MMCSD_BUSWIDTH_4;
  988. }
  989. if (mmcsd->support_8bit) {
  990. host->flags |= MMCSD_BUSWIDTH_8;
  991. }
  992. if (mmcsd->support_1v8)
  993. {
  994. host->freq_max = 166000000;
  995. host->flags |= MMCSD_SUP_HS200_1V8;
  996. host->flags |= MMCSD_SUP_SDR50 | MMCSD_SUP_SDR104;
  997. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  998. {
  999. host->flags |= MMCSD_SUP_DDR50;
  1000. }
  1001. if (mmcsd->support_8bit)
  1002. {
  1003. host->flags |= MMCSD_SUP_HS400_1V8 | MMCSD_SUP_ENH_DS;
  1004. }
  1005. }
  1006. /* For eMMC device, add High Speed DDR mode support as long as it is supported by the host controller */
  1007. if (sdxc_is_ddr50_supported(mmcsd->sdxc_base))
  1008. {
  1009. host->flags |= MMCSD_SUP_HIGHSPEED_DDR;
  1010. }
  1011. rt_strncpy(host->name, mmcsd->name, RT_NAME_MAX);
  1012. if (!mmcsd->use_noncacheable_buf)
  1013. {
  1014. mmcsd->data_buf = rt_malloc_align(mmcsd->data_buf_size, CACHELINE_SIZE);
  1015. }
  1016. RT_ASSERT(mmcsd->data_buf != RT_NULL);
  1017. host->max_seg_size = mmcsd->data_buf_size;
  1018. host->max_dma_segs = 1;
  1019. host->max_blk_size = 512;
  1020. host->max_blk_count = mmcsd->data_buf_size / SDMMC_DEFAULT_SECTOR_SIZE;
  1021. mmcsd->host = host;
  1022. /* Perform necessary initialization */
  1023. board_sd_configure_clock(mmcsd->sdxc_base, 375000, true);
  1024. sdxc_config_t sdxc_config = { 0 };
  1025. sdxc_config.data_timeout = SDXC_DATA_TIMEOUT;
  1026. sdxc_init(mmcsd->sdxc_base, &sdxc_config);
  1027. host->private_data = mmcsd;
  1028. /* Initialize PWR pin and VSEL pin */
  1029. if (mmcsd->pwr_pin_name != RT_NULL)
  1030. {
  1031. hpm_sdmmc_pin_init(mmcsd->pwr_pin_name, true);
  1032. rt_thread_mdelay(1);
  1033. if (host->valid_ocr == VDD_165_195)
  1034. {
  1035. hpm_sdmmc_switch_to_1v8_via_pin(mmcsd);
  1036. }
  1037. else
  1038. {
  1039. hpm_sdmmc_switch_to_3v3_via_pin(mmcsd);
  1040. }
  1041. }
  1042. if (mmcsd->vsel_pin_name != RT_NULL)
  1043. {
  1044. hpm_sdmmc_pin_init(mmcsd->vsel_pin_name, true);
  1045. rt_thread_mdelay(1);
  1046. }
  1047. if (mmcsd->enable_interrupt_driven)
  1048. {
  1049. char event_name[RT_NAME_MAX];
  1050. snprintf(event_name, sizeof(event_name), "%s%s", mmcsd->name, "_evt");
  1051. mmcsd->xfer_event = rt_event_create(event_name, RT_IPC_FLAG_FIFO);
  1052. RT_ASSERT(mmcsd->xfer_event != RT_NULL);
  1053. const uint32_t irq_mask = SDXC_INT_STAT_CMD_COMPLETE_MASK \
  1054. | SDXC_INT_STAT_XFER_COMPLETE_MASK \
  1055. | SDXC_INT_STAT_ERR_INTERRUPT_MASK;
  1056. sdxc_enable_interrupt_signal(mmcsd->sdxc_base, irq_mask, true);
  1057. }
  1058. intc_m_enable_irq_with_priority(mmcsd->irq_num, mmcsd->irq_priority);
  1059. mmcsd_change(host);
  1060. }
  1061. if (err != RT_EOK)
  1062. {
  1063. if (host != NULL)
  1064. {
  1065. mmcsd_free_host(host);
  1066. host = NULL;
  1067. }
  1068. }
  1069. return err;
  1070. }
  1071. INIT_DEVICE_EXPORT(rt_hw_sdio_init);
  1072. #endif