drv_spi.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509
  1. /*
  2. * Copyright (c) 2021-2025 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-02-01 HPMicro First version
  9. * 2023-02-15 HPMicro Add DMA support
  10. * 2023-07-14 HPMicro Manage the DMA buffer alignment in driver
  11. * 2023-12-14 HPMicro change state blocking wait to interrupt semaphore wait for DMA
  12. * 2024-06-10 HPMicro Add the SPI pin settings
  13. * 2025-03-17 HPMicro Improve SPI driver,support SPI/DSPI/QSPI
  14. * 2025-07-14 HPMicro Check CS pin in xfer API
  15. * 2025-08-05 HPMicro Optimized cache alignment handling for DMA transfers
  16. */
  17. #include <rtthread.h>
  18. #ifdef BSP_USING_SPI
  19. #include <rtdevice.h>
  20. #include "board.h"
  21. #include "drv_spi.h"
  22. #include "hpm_spi_drv.h"
  23. #include "hpm_sysctl_drv.h"
  24. #include "hpm_dma_mgr.h"
  25. #include "hpm_dmamux_drv.h"
  26. #include "hpm_l1c_drv.h"
  27. #include "hpm_clock_drv.h"
  28. #define DBG_TAG "drv.spi"
  29. #define DBG_LVL DBG_INFO
  30. #include <rtdbg.h>
  31. #if defined(BSP_USING_SPI0)
  32. #ifndef BSP_SPI0_USING_QUAD_IO
  33. #ifndef BSP_SPI0_USING_DUAL_IO
  34. #ifndef BSP_SPI0_USING_SINGLE_IO
  35. #define BSP_SPI0_USING_SINGLE_IO
  36. #endif
  37. #endif
  38. #endif
  39. #endif
  40. #if defined(BSP_USING_SPI1)
  41. #ifndef BSP_SPI1_USING_QUAD_IO
  42. #ifndef BSP_SPI1_USING_DUAL_IO
  43. #ifndef BSP_SPI1_USING_SINGLE_IO
  44. #define BSP_SPI1_USING_SINGLE_IO
  45. #endif
  46. #endif
  47. #endif
  48. #endif
  49. #if defined(BSP_USING_SPI2)
  50. #ifndef BSP_SPI2_USING_QUAD_IO
  51. #ifndef BSP_SPI2_USING_DUAL_IO
  52. #ifndef BSP_SPI2_USING_SINGLE_IO
  53. #define BSP_SPI2_USING_SINGLE_IO
  54. #endif
  55. #endif
  56. #endif
  57. #endif
  58. #if defined(BSP_USING_SPI3)
  59. #ifndef BSP_SPI3_USING_QUAD_IO
  60. #ifndef BSP_SPI3_USING_DUAL_IO
  61. #ifndef BSP_SPI3_USING_SINGLE_IO
  62. #define BSP_SPI3_USING_SINGLE_IO
  63. #endif
  64. #endif
  65. #endif
  66. #endif
  67. #if defined(BSP_USING_SPI4)
  68. #ifndef BSP_SPI4_USING_QUAD_IO
  69. #ifndef BSP_SPI4_USING_DUAL_IO
  70. #ifndef BSP_SPI4_USING_SINGLE_IO
  71. #define BSP_SPI4_USING_SINGLE_IO
  72. #endif
  73. #endif
  74. #endif
  75. #endif
  76. #if defined(BSP_USING_SPI5)
  77. #ifndef BSP_SPI5_USING_QUAD_IO
  78. #ifndef BSP_SPI5_USING_DUAL_IO
  79. #ifndef BSP_SPI5_USING_SINGLE_IO
  80. #define BSP_SPI5_USING_SINGLE_IO
  81. #endif
  82. #endif
  83. #endif
  84. #endif
  85. #if defined(BSP_USING_SPI6)
  86. #ifndef BSP_SPI6_USING_QUAD_IO
  87. #ifndef BSP_SPI6_USING_DUAL_IO
  88. #ifndef BSP_SPI6_USING_SINGLE_IO
  89. #define BSP_SPI6_USING_SINGLE_IO
  90. #endif
  91. #endif
  92. #endif
  93. #endif
  94. #if defined(BSP_USING_SPI7)
  95. #ifndef BSP_SPI7_USING_QUAD_IO
  96. #ifndef BSP_SPI7_USING_DUAL_IO
  97. #ifndef BSP_SPI7_USING_SINGLE_IO
  98. #define BSP_SPI7_USING_SINGLE_IO
  99. #endif
  100. #endif
  101. #endif
  102. #endif
  103. struct hpm_spi
  104. {
  105. uint32_t instance;
  106. char *bus_name;
  107. SPI_Type *spi_base;
  108. clock_name_t clk_name;
  109. spi_data_phase_format_t spi_io_mode;
  110. spi_control_config_t control_config;
  111. struct rt_spi_bus spi_bus;
  112. rt_sem_t xfer_sem;
  113. rt_bool_t enable_dma;
  114. rt_uint8_t tx_dmamux;
  115. rt_uint8_t rx_dmamux;
  116. dma_resource_t tx_dma;
  117. dma_resource_t rx_dma;
  118. rt_uint8_t spi_irq;
  119. rt_uint8_t spi_irq_priority;
  120. rt_sem_t spi_xfer_done_sem;
  121. rt_sem_t txdma_xfer_done_sem;
  122. rt_sem_t rxdma_xfer_done_sem;
  123. void (*spi_pins_init)(SPI_Type *spi_base);
  124. };
  125. typedef struct {
  126. rt_uint8_t *raw_alloc_tx_buf;
  127. rt_uint8_t *raw_alloc_rx_buf;
  128. rt_uint8_t *aligned_tx_buf;
  129. rt_uint8_t *aligned_rx_buf;
  130. rt_uint32_t aligned_size;
  131. } spi_dma_buf_ctx_t;
  132. static rt_err_t hpm_spi_configure(struct rt_spi_device *device, struct rt_spi_configuration *cfg);
  133. static rt_ssize_t hpm_spi_xfer(struct rt_spi_device *device, struct rt_spi_message *msg);
  134. static struct hpm_spi hpm_spis[] =
  135. {
  136. #if defined(BSP_USING_SPI0)
  137. {
  138. #if defined(BSP_SPI0_USING_SINGLE_IO)
  139. .bus_name = "spi0",
  140. .spi_io_mode = spi_single_io_mode,
  141. #endif
  142. #if defined(BSP_SPI0_USING_DUAL_IO)
  143. .bus_name = "dspi0",
  144. .spi_io_mode = spi_dual_io_mode,
  145. #endif
  146. #if defined(BSP_SPI0_USING_QUAD_IO)
  147. .bus_name = "qspi0",
  148. .spi_io_mode = spi_quad_io_mode,
  149. #endif
  150. .spi_base = HPM_SPI0,
  151. .clk_name = clock_spi0,
  152. #if defined(BSP_SPI0_USING_DMA)
  153. .enable_dma = RT_TRUE,
  154. #endif
  155. .tx_dmamux = HPM_DMA_SRC_SPI0_TX,
  156. .rx_dmamux = HPM_DMA_SRC_SPI0_RX,
  157. .spi_irq = IRQn_SPI0,
  158. #if defined(BSP_SPI0_IRQ_PRIORITY)
  159. .spi_irq_priority = BSP_SPI0_IRQ_PRIORITY,
  160. #else
  161. .spi_irq_priority = 1,
  162. #endif
  163. #if !defined BSP_SPI0_USING_HARD_CS
  164. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  165. #else
  166. .spi_pins_init = init_spi_pins,
  167. #endif
  168. },
  169. #endif
  170. #if defined(BSP_USING_SPI1)
  171. {
  172. #if defined(BSP_SPI1_USING_SINGLE_IO)
  173. .bus_name = "spi1",
  174. .spi_io_mode = spi_single_io_mode,
  175. #endif
  176. #if defined(BSP_SPI1_USING_DUAL_IO)
  177. .bus_name = "dspi1",
  178. .spi_io_mode = spi_dual_io_mode,
  179. #endif
  180. #if defined(BSP_SPI1_USING_QUAD_IO)
  181. .bus_name = "qspi1",
  182. .spi_io_mode = spi_quad_io_mode,
  183. #endif
  184. .spi_base = HPM_SPI1,
  185. .clk_name = clock_spi1,
  186. #if defined(BSP_SPI1_USING_DMA)
  187. .enable_dma = RT_TRUE,
  188. #endif
  189. .tx_dmamux = HPM_DMA_SRC_SPI1_TX,
  190. .rx_dmamux = HPM_DMA_SRC_SPI1_RX,
  191. .spi_irq = IRQn_SPI1,
  192. #if defined(BSP_SPI1_IRQ_PRIORITY)
  193. .spi_irq_priority = BSP_SPI1_IRQ_PRIORITY,
  194. #else
  195. .spi_irq_priority = 1,
  196. #endif
  197. #if !defined BSP_SPI1_USING_HARD_CS
  198. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  199. #else
  200. .spi_pins_init = init_spi_pins,
  201. #endif
  202. },
  203. #endif
  204. #if defined(BSP_USING_SPI2)
  205. {
  206. #if defined(BSP_SPI2_USING_SINGLE_IO)
  207. .bus_name = "spi2",
  208. .spi_io_mode = spi_single_io_mode,
  209. #endif
  210. #if defined(BSP_SPI2_USING_DUAL_IO)
  211. .bus_name = "dspi2",
  212. .spi_io_mode = spi_dual_io_mode,
  213. #endif
  214. #if defined(BSP_SPI2_USING_QUAD_IO)
  215. .bus_name = "qspi2",
  216. .spi_io_mode = spi_quad_io_mode,
  217. #endif
  218. .spi_base = HPM_SPI2,
  219. .clk_name = clock_spi2,
  220. #if defined(BSP_SPI2_USING_DMA)
  221. .enable_dma = RT_TRUE,
  222. #endif
  223. .tx_dmamux = HPM_DMA_SRC_SPI2_TX,
  224. .rx_dmamux = HPM_DMA_SRC_SPI2_RX,
  225. .spi_irq = IRQn_SPI2,
  226. #if defined(BSP_SPI2_IRQ_PRIORITY)
  227. .spi_irq_priority = BSP_SPI2_IRQ_PRIORITY,
  228. #else
  229. .spi_irq_priority = 1,
  230. #endif
  231. #if !defined BSP_SPI2_USING_HARD_CS
  232. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  233. #else
  234. .spi_pins_init = init_spi_pins,
  235. #endif
  236. },
  237. #endif
  238. #if defined(BSP_USING_SPI3)
  239. {
  240. #if defined(BSP_SPI3_USING_SINGLE_IO)
  241. .bus_name = "spi3",
  242. .spi_io_mode = spi_single_io_mode,
  243. #endif
  244. #if defined(BSP_SPI3_USING_DUAL_IO)
  245. .bus_name = "dspi3",
  246. .spi_io_mode = spi_dual_io_mode,
  247. #endif
  248. #if defined(BSP_SPI3_USING_QUAD_IO)
  249. .bus_name = "qspi3",
  250. .spi_io_mode = spi_quad_io_mode,
  251. #endif
  252. .spi_base = HPM_SPI3,
  253. .clk_name = clock_spi3,
  254. #if defined(BSP_SPI3_USING_DMA)
  255. .enable_dma = RT_TRUE,
  256. #endif
  257. .tx_dmamux = HPM_DMA_SRC_SPI3_TX,
  258. .rx_dmamux = HPM_DMA_SRC_SPI3_RX,
  259. .spi_irq = IRQn_SPI3,
  260. #if defined(BSP_SPI3_IRQ_PRIORITY)
  261. .spi_irq_priority = BSP_SPI3_IRQ_PRIORITY,
  262. #else
  263. .spi_irq_priority = 1,
  264. #endif
  265. #if !defined BSP_SPI3_USING_HARD_CS
  266. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  267. #else
  268. .spi_pins_init = init_spi_pins,
  269. #endif
  270. },
  271. #endif
  272. #if defined(BSP_USING_SPI4)
  273. {
  274. #if defined(BSP_SPI4_USING_SINGLE_IO)
  275. .bus_name = "spi4",
  276. .spi_io_mode = spi_single_io_mode,
  277. #endif
  278. #if defined(BSP_SPI4_USING_DUAL_IO)
  279. .bus_name = "dspi4",
  280. .spi_io_mode = spi_dual_io_mode,
  281. #endif
  282. #if defined(BSP_SPI4_USING_QUAD_IO)
  283. .bus_name = "qspi4",
  284. .spi_io_mode = spi_quad_io_mode,
  285. #endif
  286. .spi_base = HPM_SPI4,
  287. .clk_name = clock_spi4,
  288. #if defined(BSP_SPI4_USING_DMA)
  289. .enable_dma = RT_TRUE,
  290. #endif
  291. .tx_dmamux = HPM_DMA_SRC_SPI4_TX,
  292. .rx_dmamux = HPM_DMA_SRC_SPI4_RX,
  293. .spi_irq = IRQn_SPI4,
  294. #if defined(BSP_SPI4_IRQ_PRIORITY)
  295. .spi_irq_priority = BSP_SPI4_IRQ_PRIORITY,
  296. #else
  297. .spi_irq_priority = 1,
  298. #endif
  299. #if !defined BSP_SPI4_USING_HARD_CS
  300. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  301. #else
  302. .spi_pins_init = init_spi_pins,
  303. #endif
  304. },
  305. #endif
  306. #if defined(BSP_USING_SPI5)
  307. {
  308. #if defined(BSP_SPI5_USING_SINGLE_IO)
  309. .bus_name = "spi5",
  310. .spi_io_mode = spi_single_io_mode,
  311. #endif
  312. #if defined(BSP_SPI5_USING_DUAL_IO)
  313. .bus_name = "dspi5",
  314. .spi_io_mode = spi_dual_io_mode,
  315. #endif
  316. #if defined(BSP_SPI5_USING_QUAD_IO)
  317. .bus_name = "qspi5",
  318. .spi_io_mode = spi_quad_io_mode,
  319. #endif
  320. .spi_base = HPM_SPI5,
  321. .clk_name = clock_spi5,
  322. #if defined(BSP_SPI5_USING_DMA)
  323. .enable_dma = RT_TRUE,
  324. #endif
  325. .tx_dmamux = HPM_DMA_SRC_SPI5_TX,
  326. .rx_dmamux = HPM_DMA_SRC_SPI5_RX,
  327. .spi_irq = IRQn_SPI5,
  328. #if defined(BSP_SPI5_IRQ_PRIORITY)
  329. .spi_irq_priority = BSP_SPI5_IRQ_PRIORITY,
  330. #else
  331. .spi_irq_priority = 1,
  332. #endif
  333. #if !defined BSP_SPI5_USING_HARD_CS
  334. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  335. #else
  336. .spi_pins_init = init_spi_pins,
  337. #endif
  338. },
  339. #endif
  340. #if defined(BSP_USING_SPI6)
  341. {
  342. #if defined(BSP_SPI6_USING_SINGLE_IO)
  343. .bus_name = "spi6",
  344. .spi_io_mode = spi_single_io_mode,
  345. #endif
  346. #if defined(BSP_SPI6_USING_DUAL_IO)
  347. .bus_name = "dspi6",
  348. .spi_io_mode = spi_dual_io_mode,
  349. #endif
  350. #if defined(BSP_SPI6_USING_QUAD_IO)
  351. .bus_name = "qspi6",
  352. .spi_io_mode = spi_quad_io_mode,
  353. #endif
  354. .spi_base = HPM_SPI6,
  355. .clk_name = clock_spi6,
  356. #if defined(BSP_SPI6_USING_DMA)
  357. .enable_dma = RT_TRUE,
  358. #endif
  359. .tx_dmamux = HPM_DMA_SRC_SPI6_TX,
  360. .rx_dmamux = HPM_DMA_SRC_SPI6_RX,
  361. .spi_irq = IRQn_SPI6,
  362. #if defined(BSP_SPI6_IRQ_PRIORITY)
  363. .spi_irq_priority = BSP_SPI6_IRQ_PRIORITY,
  364. #else
  365. .spi_irq_priority = 1,
  366. #endif
  367. #if !defined BSP_SPI6_USING_HARD_CS
  368. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  369. #else
  370. .spi_pins_init = init_spi_pins,
  371. #endif
  372. },
  373. #endif
  374. #if defined(BSP_USING_SPI7)
  375. {
  376. #if defined(BSP_SPI7_USING_SINGLE_IO)
  377. .bus_name = "spi7",
  378. .spi_io_mode = spi_single_io_mode,
  379. #endif
  380. #if defined(BSP_SPI7_USING_DUAL_IO)
  381. .bus_name = "dspi7",
  382. .spi_io_mode = spi_dual_io_mode,
  383. #endif
  384. #if defined(BSP_SPI7_USING_QUAD_IO)
  385. .bus_name = "qspi7",
  386. .spi_io_mode = spi_quad_io_mode,
  387. #endif
  388. .spi_base = HPM_SPI7,
  389. .clk_name = clock_spi7,
  390. #if defined(BSP_SPI7_USING_DMA)
  391. .enable_dma = RT_TRUE,
  392. #endif
  393. .tx_dmamux = HPM_DMA_SRC_SPI7_TX,
  394. .rx_dmamux = HPM_DMA_SRC_SPI7_RX,
  395. .spi_irq = IRQn_SPI7,
  396. #if defined(BSP_SPI7_IRQ_PRIORITY)
  397. .spi_irq_priority = BSP_SPI7_IRQ_PRIORITY,
  398. #else
  399. .spi_irq_priority = 1,
  400. #endif
  401. #if !defined BSP_SPI7_USING_HARD_CS
  402. .spi_pins_init = init_spi_pins_with_gpio_as_cs,
  403. #else
  404. .spi_pins_init = init_spi_pins,
  405. #endif
  406. },
  407. #endif
  408. };
  409. static struct rt_spi_ops hpm_spi_ops =
  410. {
  411. .configure = hpm_spi_configure,
  412. .xfer = hpm_spi_xfer,
  413. };
  414. static inline void handle_spi_isr(SPI_Type *ptr)
  415. {
  416. volatile uint32_t irq_status;
  417. RT_ASSERT(ptr != RT_NULL);
  418. rt_base_t level;
  419. level = rt_hw_interrupt_disable();
  420. irq_status = spi_get_interrupt_status(ptr);
  421. if (irq_status & spi_end_int)
  422. {
  423. spi_clear_interrupt_status(ptr, spi_end_int);
  424. for (uint32_t i = 0; i < sizeof(hpm_spis) / sizeof(hpm_spis[0]); i++)
  425. {
  426. if (hpm_spis[i].spi_base == ptr)
  427. {
  428. rt_sem_release(hpm_spis[i].spi_xfer_done_sem);
  429. break;
  430. }
  431. }
  432. }
  433. rt_hw_interrupt_enable(level);
  434. }
  435. #if defined(BSP_USING_SPI0)
  436. SDK_DECLARE_EXT_ISR_M(IRQn_SPI0, spi0_isr);
  437. void spi0_isr(void)
  438. {
  439. handle_spi_isr(HPM_SPI0);
  440. }
  441. #endif
  442. #if defined(BSP_USING_SPI1)
  443. SDK_DECLARE_EXT_ISR_M(IRQn_SPI1, spi1_isr);
  444. void spi1_isr(void)
  445. {
  446. handle_spi_isr(HPM_SPI1);
  447. }
  448. #endif
  449. #if defined(BSP_USING_SPI2)
  450. SDK_DECLARE_EXT_ISR_M(IRQn_SPI2, spi2_isr);
  451. void spi2_isr(void)
  452. {
  453. handle_spi_isr(HPM_SPI2);
  454. }
  455. #endif
  456. #if defined(BSP_USING_SPI3)
  457. SDK_DECLARE_EXT_ISR_M(IRQn_SPI3, spi3_isr);
  458. void spi3_isr(void)
  459. {
  460. handle_spi_isr(HPM_SPI3);
  461. }
  462. #endif
  463. #if defined(BSP_USING_SPI4)
  464. SDK_DECLARE_EXT_ISR_M(IRQn_SPI4, spi4_isr);
  465. void spi4_isr(void)
  466. {
  467. handle_spi_isr(HPM_SPI4);
  468. }
  469. #endif
  470. #if defined(BSP_USING_SPI5)
  471. SDK_DECLARE_EXT_ISR_M(IRQn_SPI5, spi5_isr);
  472. void spi5_isr(void)
  473. {
  474. handle_spi_isr(HPM_SPI5);
  475. }
  476. #endif
  477. #if defined(BSP_USING_SPI6)
  478. SDK_DECLARE_EXT_ISR_M(IRQn_SPI6, spi6_isr);
  479. void spi6_isr(void)
  480. {
  481. handle_spi_isr(HPM_SPI6);
  482. }
  483. #endif
  484. #if defined(BSP_USING_SPI7)
  485. SDK_DECLARE_EXT_ISR_M(IRQn_SPI7, spi7_isr);
  486. void spi7_isr(void)
  487. {
  488. handle_spi_isr(HPM_SPI7);
  489. }
  490. #endif
  491. void spi_dma_channel_tc_callback(DMA_Type *ptr, uint32_t channel, void *user_data)
  492. {
  493. struct hpm_spi *spi = (struct hpm_spi *)user_data;
  494. RT_ASSERT(spi != RT_NULL);
  495. RT_ASSERT(ptr != RT_NULL);
  496. rt_base_t level;
  497. level = rt_hw_interrupt_disable();
  498. if ((spi->tx_dma.base == ptr) && spi->tx_dma.channel == channel)
  499. {
  500. dma_mgr_disable_chn_irq(&spi->tx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  501. rt_sem_release(spi->txdma_xfer_done_sem);
  502. }
  503. if ((spi->rx_dma.base == ptr) && spi->rx_dma.channel == channel)
  504. {
  505. dma_mgr_disable_chn_irq(&spi->rx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  506. rt_sem_release(spi->rxdma_xfer_done_sem);
  507. }
  508. rt_hw_interrupt_enable(level);
  509. }
  510. static rt_err_t hpm_spi_configure(struct rt_spi_device *device, struct rt_spi_configuration *cfg)
  511. {
  512. spi_timing_config_t timing_config = { 0 };
  513. spi_format_config_t format_config = { 0 };
  514. struct hpm_spi *spi = RT_NULL;
  515. hpm_stat_t stat = status_success;
  516. spi = (struct hpm_spi *)(device->bus->parent.user_data);
  517. RT_ASSERT(spi != RT_NULL);
  518. /* hpm spi data width support 1 ~ 32 */
  519. RT_ASSERT((cfg->data_width > 0) && (cfg->data_width <= 32));
  520. spi->spi_pins_init(spi->spi_base);
  521. struct rt_spi_configuration *spi_cfg = cfg;
  522. if (spi_cfg->mode & RT_SPI_SLAVE) {
  523. spi_slave_get_default_format_config(&format_config);
  524. spi_slave_get_default_control_config(&spi->control_config);
  525. spi->control_config.slave_config.slave_data_only = true;
  526. } else {
  527. spi_master_get_default_timing_config(&timing_config);
  528. spi_master_get_default_format_config(&format_config);
  529. spi_master_get_default_control_config(&spi->control_config);
  530. timing_config.master_config.cs2sclk = spi_cs2sclk_half_sclk_1;
  531. timing_config.master_config.csht = spi_csht_half_sclk_1;
  532. timing_config.master_config.clk_src_freq_in_hz = board_init_spi_clock(spi->spi_base);
  533. if (spi_cfg->max_hz > timing_config.master_config.clk_src_freq_in_hz) {
  534. spi_cfg->max_hz = timing_config.master_config.clk_src_freq_in_hz;
  535. }
  536. timing_config.master_config.sclk_freq_in_hz = spi_cfg->max_hz;
  537. stat = spi_master_timing_init(spi->spi_base, &timing_config);
  538. LOG_D("spi clock frequency = %d, spi sclk frequency = %d", timing_config.master_config.clk_src_freq_in_hz, timing_config.master_config.sclk_freq_in_hz);
  539. if (stat != status_success) {
  540. LOG_E("spi clock frequency = %d, spi sclk frequency = %d \n", timing_config.master_config.clk_src_freq_in_hz, timing_config.master_config.sclk_freq_in_hz);
  541. LOG_E("set spi master sclk frequency fail, SPI_freq / spi_sclk must be an integer multiple and the ratio must be an even number.");
  542. return -RT_EINVAL;
  543. }
  544. }
  545. format_config.common_config.data_len_in_bits = cfg->data_width;
  546. format_config.common_config.cpha = (cfg->mode & RT_SPI_CPHA) ? spi_sclk_sampling_even_clk_edges : spi_sclk_sampling_odd_clk_edges;
  547. format_config.common_config.cpol = (cfg->mode & RT_SPI_CPOL) ? spi_sclk_high_idle : spi_sclk_low_idle;
  548. format_config.common_config.lsb = (cfg->mode & RT_SPI_MSB) ? false : true;
  549. format_config.common_config.mosi_bidir = cfg->mode & RT_SPI_3WIRE ? true : false;
  550. spi_format_init(spi->spi_base, &format_config);
  551. spi->control_config.master_config.addr_enable = false;
  552. spi->control_config.master_config.cmd_enable = false;
  553. spi->control_config.master_config.token_enable = false;
  554. spi->control_config.common_config.trans_mode = spi_trans_write_read_together;
  555. return RT_EOK;
  556. }
  557. static rt_err_t hpm_spi_check_params(struct rt_spi_device *device, struct rt_spi_message *msg)
  558. {
  559. struct rt_spi_message *spi_msg = (struct rt_spi_message *)msg;
  560. #ifdef RT_USING_QSPI
  561. struct rt_qspi_message *qspi_msg = (struct rt_qspi_message *)msg;
  562. struct rt_qspi_device *qspi_dev = (struct rt_qspi_device *)device;
  563. struct rt_qspi_configuration *qspi_cfg = (struct rt_qspi_configuration *)&qspi_dev->config;
  564. #endif
  565. if ((device->config.mode == RT_SPI_SLAVE) && (msg->length > SPI_SOC_TRANSFER_COUNT_MAX)) {
  566. LOG_E("spi SPI transfer cannot exceed %d bytes for slave\n", SPI_SOC_TRANSFER_COUNT_MAX);
  567. return -RT_EINVAL;
  568. }
  569. if ((device->config.mode == RT_SPI_SLAVE) && ((msg->recv_buf == RT_NULL) || (msg->send_buf == RT_NULL)))
  570. {
  571. LOG_E("spi only support read write toggther mode for slave\n");
  572. return -RT_EINVAL;
  573. }
  574. #ifdef RT_USING_QSPI
  575. if (device->bus->mode == RT_SPI_BUS_MODE_QSPI) {
  576. if ((device->config.mode & RT_SPI_MASTER) == RT_SPI_MASTER) {
  577. if (qspi_msg->instruction.qspi_lines > 1) {
  578. LOG_E("dspi/qspi only support single instruction(command) phase for master\n");
  579. return -RT_EINVAL;
  580. }
  581. }
  582. if (((device->config.mode & RT_SPI_MASTER) == RT_SPI_MASTER) && ((msg->recv_buf != RT_NULL) && (msg->send_buf != RT_NULL)) &&
  583. (qspi_msg->dummy_cycles > 0)) {
  584. LOG_E("dspi/qspi only not support dummy phase on read write toggther mode for master\n");
  585. return -RT_EINVAL;
  586. }
  587. if (qspi_msg->address.size != 0) {
  588. if (((qspi_msg->address.size != 8) && (qspi_msg->address.size != 16) && (qspi_msg->address.size != 24) && (qspi_msg->address.size != 32))) {
  589. LOG_E("dspi/qspi only support address phase size 8/16/24/32 for master\n");
  590. return -RT_EINVAL;
  591. }
  592. }
  593. if (qspi_msg->address.qspi_lines != 0) {
  594. if ((qspi_msg->address.qspi_lines != 1) && (qspi_msg->address.qspi_lines != 2) && (qspi_msg->address.qspi_lines != 4)) {
  595. LOG_E("dspi/qspi only support address phase qspi lines 1/2/4 for master\n");
  596. return -RT_EINVAL;
  597. }
  598. }
  599. if (qspi_msg->alternate_bytes.size != 0) {
  600. LOG_E("dspi/qspi not support alternate phase size 0 for master\n");
  601. return -RT_EINVAL;
  602. }
  603. if (qspi_cfg->qspi_dl_width != 0) {
  604. switch (qspi_cfg->qspi_dl_width)
  605. {
  606. case 1:
  607. if (qspi_msg->dummy_cycles != 0) {
  608. if ((qspi_msg->dummy_cycles > (4 * 8))) {
  609. LOG_E("spi only support dummy phase cycles < 32 for master\n");
  610. return -RT_EINVAL;
  611. }
  612. if ((qspi_msg->dummy_cycles != 0) && (qspi_msg->dummy_cycles % 8)) {
  613. LOG_E("The number of cycles should be an integer multiple of 8 for spi master\n");
  614. return -RT_EINVAL;
  615. }
  616. }
  617. break;
  618. case 2:
  619. if (qspi_msg->dummy_cycles != 0) {
  620. if (qspi_msg->dummy_cycles > (4 * 4)) {
  621. LOG_E("dspi only support dummy phase cycles < 16 for master\n");
  622. return -RT_EINVAL;
  623. }
  624. if (qspi_msg->dummy_cycles % 4) {
  625. LOG_E("The number of cycles should be an integer multiple of 4 for spi master\n");
  626. return -RT_EINVAL;
  627. }
  628. }
  629. break;
  630. case 4:
  631. if (qspi_msg->dummy_cycles != 0) {
  632. if (qspi_msg->dummy_cycles > (4 * 2)) {
  633. LOG_E("qspi only support dummy phase cycles < 8 for master\n");
  634. return -RT_EINVAL;
  635. }
  636. if (qspi_msg->dummy_cycles % 2) {
  637. LOG_E("The number of cycles should be an integer multiple of 2 for spi master\n");
  638. return -RT_EINVAL;
  639. }
  640. }
  641. break;
  642. default:
  643. LOG_E("spi only support data phase qspi lines 1/2/4 for master\n");
  644. return -RT_EINVAL;
  645. }
  646. }
  647. }
  648. #endif
  649. return RT_EOK;
  650. }
  651. bool hpm_qspi_parse_phase_message(struct rt_spi_device *device, struct rt_qspi_message *msg,
  652. spi_control_config_t *control_config, rt_uint8_t *cmd, rt_uint32_t *addr)
  653. {
  654. rt_uint8_t dummy_bytes = 0;
  655. bool need_dummy = RT_FALSE;
  656. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  657. #ifdef RT_USING_QSPI
  658. struct rt_qspi_message *qspi_msg = (struct rt_qspi_message *)msg;
  659. struct rt_qspi_device *qspi_dev = (struct rt_qspi_device *)device;
  660. struct rt_qspi_configuration *qspi_cfg = (struct rt_qspi_configuration *)&qspi_dev->config;
  661. if ((device->bus->mode == RT_SPI_BUS_MODE_QSPI) && ((device->config.mode & RT_SPI_MASTER) == RT_SPI_MASTER)) {
  662. if (msg->instruction.qspi_lines == 1) {
  663. (*cmd) = msg->instruction.content;
  664. control_config->master_config.cmd_enable = RT_TRUE;
  665. } else {
  666. control_config->master_config.cmd_enable = RT_FALSE;
  667. }
  668. if (msg->address.qspi_lines != 0) {
  669. control_config->master_config.addr_enable = RT_TRUE;
  670. switch (msg->address.qspi_lines) {
  671. case 1:
  672. control_config->master_config.addr_phase_fmt = spi_address_phase_format_single_io_mode;
  673. break;
  674. case 2:
  675. case 4:
  676. control_config->master_config.addr_phase_fmt = spi_address_phase_format_dualquad_io_mode;
  677. break;
  678. default:
  679. break;
  680. }
  681. } else {
  682. control_config->master_config.addr_enable = RT_FALSE;
  683. }
  684. if (msg->address.size != 0) {
  685. (*addr) = msg->address.content;
  686. control_config->master_config.addr_enable = RT_TRUE;
  687. switch (msg->address.size) {
  688. case 8:
  689. spi_set_address_len(spi->spi_base, addrlen_8bit);
  690. break;
  691. case 16:
  692. spi_set_address_len(spi->spi_base, addrlen_16bit);
  693. break;
  694. case 24:
  695. spi_set_address_len(spi->spi_base, addrlen_24bit);
  696. break;
  697. case 32:
  698. spi_set_address_len(spi->spi_base, addrlen_32bit);
  699. break;
  700. default:
  701. break;
  702. }
  703. } else {
  704. control_config->master_config.addr_enable = RT_FALSE;
  705. }
  706. if (msg->dummy_cycles == 0) {
  707. need_dummy = RT_FALSE;
  708. } else {
  709. need_dummy = RT_TRUE;
  710. switch (msg->qspi_data_lines)
  711. {
  712. case 1:
  713. dummy_bytes = (msg->dummy_cycles + 7) / 8;
  714. break;
  715. case 2:
  716. dummy_bytes = (msg->dummy_cycles + 3) / 4;
  717. break;
  718. case 4:
  719. dummy_bytes = (msg->dummy_cycles + 1) / 2;
  720. break;
  721. default:
  722. break;
  723. }
  724. }
  725. if (dummy_bytes != 0) {
  726. switch (dummy_bytes)
  727. {
  728. case 1:
  729. control_config->common_config.dummy_cnt = spi_dummy_count_1;
  730. break;
  731. case 2:
  732. control_config->common_config.dummy_cnt = spi_dummy_count_2;
  733. break;
  734. case 3:
  735. control_config->common_config.dummy_cnt = spi_dummy_count_3;
  736. break;
  737. case 4:
  738. control_config->common_config.dummy_cnt = spi_dummy_count_4;
  739. break;
  740. default:
  741. break;
  742. }
  743. }
  744. if (qspi_cfg->qspi_dl_width == 1) {
  745. if (msg->qspi_data_lines == 1) {
  746. spi->control_config.common_config.data_phase_fmt = spi_single_io_mode;
  747. } else {
  748. LOG_E("msg data_lines must be 1 when qspi_dl_width is 1, but msg data_lines is %d\n", msg->qspi_data_lines);
  749. }
  750. } else if (qspi_cfg->qspi_dl_width == 2) {
  751. if (msg->qspi_data_lines == 1) {
  752. spi->control_config.common_config.data_phase_fmt = spi_dual_io_mode;
  753. } else if (msg->qspi_data_lines == 2) {
  754. spi->control_config.common_config.data_phase_fmt = spi_quad_io_mode;
  755. } else {
  756. LOG_E("msg data_lines must be 1 or 2 when qspi_dl_width is 2, but msg data_lines is %d\n", msg->qspi_data_lines);
  757. }
  758. } else if (qspi_cfg->qspi_dl_width == 4) {
  759. if (msg->qspi_data_lines == 1) {
  760. spi->control_config.common_config.data_phase_fmt = spi_single_io_mode;
  761. } else if (msg->qspi_data_lines == 2) {
  762. spi->control_config.common_config.data_phase_fmt = spi_dual_io_mode;
  763. } else if (msg->qspi_data_lines == 4) {
  764. spi->control_config.common_config.data_phase_fmt = spi_quad_io_mode;
  765. } else {
  766. spi->control_config.common_config.data_phase_fmt = spi_single_io_mode;
  767. }
  768. } else {
  769. spi->control_config.common_config.data_phase_fmt = spi_single_io_mode;
  770. }
  771. }
  772. #endif
  773. return need_dummy;
  774. }
  775. static rt_ssize_t hpm_spi_send_no_data(SPI_Type *ptr, spi_control_config_t *config, uint8_t *cmd, uint32_t *addr, struct rt_qspi_message *qspi_msg)
  776. {
  777. rt_ssize_t actual_len = 0;
  778. hpm_stat_t spi_stat = status_success;
  779. if (config->master_config.cmd_enable == true || config->master_config.addr_enable == true) {
  780. config->common_config.trans_mode = spi_trans_no_data;
  781. spi_stat = spi_transfer(ptr, config, cmd, addr, NULL, 0, NULL, 0);
  782. if (spi_stat != status_success) {
  783. actual_len = -RT_EIO;
  784. } else {
  785. if (qspi_msg->instruction.qspi_lines > 0) {
  786. actual_len++;
  787. }
  788. if (qspi_msg->address.size > 0) {
  789. switch (qspi_msg->address.size)
  790. {
  791. case 8:
  792. actual_len++;
  793. break;
  794. case 16:
  795. actual_len += 2;
  796. break;
  797. case 24:
  798. actual_len += 3;
  799. break;
  800. case 32:
  801. actual_len += 4;
  802. break;
  803. default:
  804. break;
  805. }
  806. }
  807. }
  808. }
  809. return actual_len;
  810. }
  811. static rt_ssize_t hpm_spi_xfer_polling(struct rt_spi_device *device, struct rt_spi_message *msg)
  812. {
  813. struct hpm_spi *spi = (struct hpm_spi *) (device->bus->parent.user_data);
  814. rt_ssize_t actual_len = 0;
  815. hpm_stat_t spi_stat = status_success;
  816. struct rt_spi_message *_msg = (struct rt_spi_message *)msg;
  817. struct rt_spi_message *spi_msg = (struct rt_spi_message *)_msg;
  818. #ifdef RT_USING_QSPI
  819. struct rt_qspi_message *qspi_msg = RT_NULL;
  820. #endif
  821. rt_uint32_t remaining_size = _msg->length;
  822. rt_uint32_t transfer_len;
  823. rt_uint8_t *tx_buf = (rt_uint8_t*) _msg->send_buf;
  824. rt_uint8_t *rx_buf = (rt_uint8_t*) _msg->recv_buf;
  825. rt_uint8_t cmd = 0;
  826. rt_uint32_t addr = 0;
  827. rt_uint32_t index = 0;
  828. bool need_dummy = RT_FALSE;
  829. RT_ASSERT(spi != RT_NULL);
  830. spi->control_config.common_config.tx_dma_enable = false;
  831. spi->control_config.common_config.rx_dma_enable = false;
  832. if (device->config.mode == RT_SPI_SLAVE) {
  833. #if 0
  834. spi_slave_enable_data_only(spi->spi_base);
  835. spi_set_transfer_mode(spi->spi_base, spi_trans_write_read_together);
  836. #endif
  837. LOG_E("Spi slave does not support polling transmission\n");
  838. return -RT_EINVAL;
  839. }
  840. while (_msg != RT_NULL) {
  841. tx_buf = (rt_uint8_t*) _msg->send_buf;
  842. rx_buf = (rt_uint8_t*) _msg->recv_buf;
  843. index = 0;
  844. if (hpm_spi_check_params(device, _msg) != RT_EOK) {
  845. return -RT_EINVAL;
  846. }
  847. #ifdef RT_USING_QSPI
  848. qspi_msg = (struct rt_qspi_message *)_msg;
  849. need_dummy = hpm_qspi_parse_phase_message(device, qspi_msg, &spi->control_config, &cmd, &addr);
  850. remaining_size = _msg->length;
  851. if (remaining_size == 0) {
  852. actual_len = hpm_spi_send_no_data(spi->spi_base, &spi->control_config, &cmd, &addr, qspi_msg);
  853. }
  854. #endif
  855. while (remaining_size > 0) {
  856. transfer_len = MIN(SPI_SOC_TRANSFER_COUNT_MAX, remaining_size);
  857. /* Next sub-packet: Disable CMD and ADDR phase for the following packet */
  858. if (index > 0) {
  859. spi->control_config.master_config.cmd_enable = RT_FALSE;
  860. spi->control_config.master_config.addr_enable = RT_FALSE;
  861. }
  862. if ((_msg->send_buf != NULL) && (_msg->recv_buf != NULL)) {
  863. spi->control_config.common_config.trans_mode = spi_trans_write_read_together;
  864. spi_stat = spi_transfer(spi->spi_base, &spi->control_config, &cmd, &addr, tx_buf, transfer_len, rx_buf, transfer_len);
  865. }
  866. else if (_msg->send_buf != NULL) {
  867. if ((need_dummy == RT_TRUE) && (index == 0)) {
  868. spi->control_config.common_config.trans_mode = spi_trans_dummy_write;
  869. } else {
  870. spi->control_config.common_config.trans_mode = spi_trans_write_only;
  871. }
  872. spi_stat = spi_transfer(spi->spi_base, &spi->control_config, &cmd, &addr, (uint8_t*) tx_buf, transfer_len, NULL, 0);
  873. }
  874. else if (_msg->recv_buf != NULL){
  875. if ((need_dummy == RT_TRUE) && (index == 0)) {
  876. spi->control_config.common_config.trans_mode = spi_trans_dummy_read;
  877. } else {
  878. spi->control_config.common_config.trans_mode = spi_trans_read_only;
  879. }
  880. spi_stat = spi_transfer(spi->spi_base, &spi->control_config, &cmd, &addr, NULL, 0, rx_buf, transfer_len);
  881. }
  882. if (spi_stat != status_success) {
  883. actual_len = -RT_EIO;
  884. break;
  885. }
  886. if (tx_buf != NULL) {
  887. tx_buf += transfer_len;
  888. }
  889. if (rx_buf != NULL) {
  890. rx_buf += transfer_len;
  891. }
  892. remaining_size -= transfer_len;
  893. actual_len += transfer_len;
  894. index++;
  895. }
  896. _msg = _msg->next;
  897. }
  898. return actual_len;
  899. }
  900. static hpm_stat_t hpm_spi_tx_dma_config(struct rt_spi_device *device, uint8_t *buff, uint32_t size)
  901. {
  902. hpm_stat_t stat = status_success;
  903. uint8_t transfer_width;
  904. uint32_t buf_addr;
  905. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  906. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(spi->spi_base);
  907. dma_resource_t *resource = &spi->tx_dma;
  908. uint32_t core_id = read_csr(CSR_MHARTID);
  909. if (data_len_in_bytes > 2) {
  910. data_len_in_bytes = 4; /* must be 4 aglin */
  911. /* word */
  912. transfer_width = DMA_MGR_TRANSFER_WIDTH_WORD;
  913. } else {
  914. /* byte or half_word*/
  915. transfer_width = data_len_in_bytes - 1;
  916. }
  917. if (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)) {
  918. return status_invalid_argument;
  919. }
  920. buf_addr = core_local_mem_to_sys_address(core_id, (uint32_t)buff);
  921. HPM_CHECK_RET(dma_mgr_set_chn_src_addr(resource, buf_addr));
  922. HPM_CHECK_RET(dma_mgr_set_chn_dst_width(resource, transfer_width));
  923. HPM_CHECK_RET(dma_mgr_set_chn_src_width(resource, transfer_width));
  924. HPM_CHECK_RET(dma_mgr_set_chn_transize(resource, size / data_len_in_bytes));
  925. return stat;
  926. }
  927. static hpm_stat_t hpm_spi_tx_dma_start(struct rt_spi_device *device)
  928. {
  929. hpm_stat_t stat = status_success;
  930. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  931. dma_resource_t *resource = &spi->tx_dma;
  932. HPM_CHECK_RET(dma_mgr_enable_channel(resource));
  933. return stat;
  934. }
  935. static hpm_stat_t hpm_spi_rx_dma_config(struct rt_spi_device *device, uint8_t *buff, uint32_t size)
  936. {
  937. hpm_stat_t stat = status_success;
  938. uint8_t transfer_width;
  939. uint32_t buf_addr;
  940. uint32_t core_id = read_csr(CSR_MHARTID);
  941. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  942. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(spi->spi_base);
  943. dma_resource_t *resource = &spi->rx_dma;
  944. if (data_len_in_bytes > 2) {
  945. data_len_in_bytes = 4; /* must be 4 aglin */
  946. /* word */
  947. transfer_width = DMA_MGR_TRANSFER_WIDTH_WORD;
  948. } else {
  949. /* byte or half_word*/
  950. transfer_width = data_len_in_bytes - 1;
  951. }
  952. if (size > (SPI_SOC_TRANSFER_COUNT_MAX * data_len_in_bytes)) {
  953. return status_invalid_argument;
  954. }
  955. buf_addr = core_local_mem_to_sys_address(core_id, (uint32_t)buff);
  956. HPM_CHECK_RET(dma_mgr_set_chn_dst_addr(resource, buf_addr));
  957. HPM_CHECK_RET(dma_mgr_set_chn_src_width(resource, transfer_width));
  958. HPM_CHECK_RET(dma_mgr_set_chn_dst_width(resource, transfer_width));
  959. HPM_CHECK_RET(dma_mgr_set_chn_transize(resource, size / data_len_in_bytes));
  960. HPM_CHECK_RET(dma_mgr_enable_channel(resource));
  961. return stat;
  962. }
  963. static hpm_stat_t hpm_spi_rx_dma_start(struct rt_spi_device *device)
  964. {
  965. hpm_stat_t stat = status_success;
  966. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  967. dma_resource_t *resource = &spi->rx_dma;
  968. HPM_CHECK_RET(dma_mgr_enable_channel(resource));
  969. return stat;
  970. }
  971. static void hpm_spi_transfer_data_cache_handle(struct rt_spi_message *msg, spi_dma_buf_ctx_t *ctx, rt_uint32_t len)
  972. {
  973. rt_uint32_t transfer_len;
  974. rt_uint8_t *tx_buf = RT_NULL;
  975. rt_uint8_t *rx_buf = RT_NULL;
  976. uint32_t aligned_start = 0;
  977. uint32_t aligned_end = 0;
  978. uint32_t aligned_size = 0;
  979. if (msg->send_buf != RT_NULL) {
  980. if (l1c_dc_is_enabled() == true) {
  981. if (((rt_uint32_t)msg->send_buf % HPM_L1C_CACHELINE_SIZE) || (len % HPM_L1C_CACHELINE_SIZE)) {
  982. ctx->aligned_size = (len + HPM_L1C_CACHELINE_SIZE - 1U) & ~(HPM_L1C_CACHELINE_SIZE - 1U);
  983. ctx->raw_alloc_tx_buf = (rt_uint8_t*)rt_malloc_align(ctx->aligned_size, HPM_L1C_CACHELINE_SIZE);
  984. RT_ASSERT(ctx->raw_alloc_tx_buf != RT_NULL);
  985. ctx->aligned_tx_buf = ctx->raw_alloc_tx_buf;
  986. rt_memcpy(ctx->aligned_tx_buf, msg->send_buf, len);
  987. l1c_dc_flush((uint32_t) (ctx->aligned_tx_buf), ctx->aligned_size);
  988. } else {
  989. ctx->aligned_tx_buf = (uint8_t*) msg->send_buf;
  990. aligned_start = HPM_L1C_CACHELINE_ALIGN_DOWN((uint32_t)(ctx->aligned_tx_buf));
  991. aligned_end = HPM_L1C_CACHELINE_ALIGN_UP((uint32_t)(ctx->aligned_tx_buf) + msg->length);
  992. aligned_size = aligned_end - aligned_start;
  993. ctx->aligned_size = aligned_size;
  994. l1c_dc_writeback(aligned_start, aligned_size);
  995. }
  996. } else {
  997. ctx->aligned_tx_buf = (uint8_t*) msg->send_buf;
  998. ctx->aligned_size = len;
  999. }
  1000. }
  1001. if (msg->recv_buf != RT_NULL) {
  1002. if (l1c_dc_is_enabled() == true) {
  1003. if (((rt_uint32_t)msg->recv_buf % HPM_L1C_CACHELINE_SIZE) || (len % HPM_L1C_CACHELINE_SIZE)) {
  1004. ctx->aligned_size = (len + HPM_L1C_CACHELINE_SIZE - 1U) & ~(HPM_L1C_CACHELINE_SIZE - 1U);
  1005. ctx->raw_alloc_rx_buf = (uint8_t*)rt_malloc_align(ctx->aligned_size, HPM_L1C_CACHELINE_SIZE);
  1006. RT_ASSERT(ctx->raw_alloc_rx_buf != RT_NULL);
  1007. ctx->aligned_rx_buf = ctx->raw_alloc_rx_buf;
  1008. l1c_dc_invalidate((uint32_t)(ctx->aligned_rx_buf), ctx->aligned_size);
  1009. } else {
  1010. ctx->aligned_rx_buf = (uint8_t*)msg->recv_buf;
  1011. ctx->aligned_size = len;
  1012. }
  1013. } else {
  1014. ctx->aligned_rx_buf = (uint8_t*) msg->recv_buf;
  1015. ctx->aligned_size = len;
  1016. }
  1017. }
  1018. }
  1019. static hpm_stat_t hpm_spi_transmit_use_fifo(struct rt_spi_device *device, struct rt_spi_message *msg, rt_uint32_t *remaining_size,
  1020. rt_uint8_t *cmd, rt_uint32_t *addr, bool *need_dummy, rt_ssize_t *actual_len)
  1021. {
  1022. hpm_stat_t stat = status_fail;
  1023. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  1024. if (((*remaining_size) <= SPI_SOC_FIFO_DEPTH) && ((device->config.mode & RT_SPI_MASTER) == RT_SPI_MASTER)
  1025. && (msg->send_buf != NULL) && (msg->recv_buf == NULL)) {
  1026. spi_enable_interrupt(spi->spi_base, spi_end_int);
  1027. if ((*need_dummy) == RT_TRUE) {
  1028. spi->control_config.common_config.trans_mode = spi_trans_dummy_write;
  1029. } else {
  1030. spi->control_config.common_config.trans_mode = spi_trans_write_only;
  1031. }
  1032. stat = spi_control_init(spi->spi_base, &spi->control_config, (*remaining_size), 0);
  1033. if (stat != status_success) {
  1034. return stat;
  1035. }
  1036. uint8_t data_len_in_bytes = spi_get_data_length_in_bytes(spi->spi_base);
  1037. if (msg->send_buf != NULL) {
  1038. for (uint8_t j = 0; j < (*remaining_size); j++) {
  1039. switch (data_len_in_bytes) {
  1040. case 1:
  1041. spi->spi_base->DATA = *(uint8_t *)msg->send_buf;
  1042. break;
  1043. case 2:
  1044. spi->spi_base->DATA = *(uint16_t *)msg->send_buf;
  1045. break;
  1046. default:
  1047. spi->spi_base->DATA = *(uint32_t *)msg->send_buf;
  1048. break;
  1049. }
  1050. msg->send_buf += data_len_in_bytes;
  1051. (*actual_len) += data_len_in_bytes;
  1052. }
  1053. }
  1054. spi->spi_base->ADDR = SPI_ADDR_ADDR_SET(*addr);
  1055. spi->spi_base->CMD = SPI_CMD_CMD_SET(*cmd);
  1056. rt_sem_take(spi->spi_xfer_done_sem, RT_WAITING_FOREVER);
  1057. }
  1058. return stat;
  1059. }
  1060. static rt_ssize_t hpm_spi_xfer_dma(struct rt_spi_device *device, struct rt_spi_message *msg)
  1061. {
  1062. spi_dma_buf_ctx_t dma_buf_ctx = {0};
  1063. rt_uint8_t cmd = 0;
  1064. rt_uint32_t addr = 0, aligned_len = 0;
  1065. rt_uint32_t remaining_size = msg->length;
  1066. rt_uint32_t transfer_len;
  1067. rt_uint8_t *tx_buf = RT_NULL;
  1068. rt_uint8_t *rx_buf = RT_NULL;
  1069. uint32_t aligned_start = 0;
  1070. uint32_t aligned_end = 0;
  1071. uint32_t aligned_size = 0;
  1072. rt_ssize_t actual_len = 0;
  1073. bool need_dummy = RT_FALSE;
  1074. hpm_stat_t stat = status_success;
  1075. rt_uint32_t index;
  1076. struct rt_spi_message *_msg = (struct rt_spi_message *)msg;
  1077. struct rt_spi_message *spi_msg = (struct rt_spi_message *)_msg;
  1078. #ifdef RT_USING_QSPI
  1079. struct rt_qspi_message *qspi_msg = RT_NULL;
  1080. #endif
  1081. struct hpm_spi *spi = (struct hpm_spi *)(device->bus->parent.user_data);
  1082. spi_enable_interrupt(spi->spi_base, spi_end_int);
  1083. while (_msg != RT_NULL) {
  1084. index = 0;
  1085. if (hpm_spi_check_params(device, _msg) != RT_EOK) {
  1086. return -RT_EINVAL;
  1087. }
  1088. remaining_size = _msg->length;
  1089. #ifdef RT_USING_QSPI
  1090. qspi_msg = (struct rt_qspi_message *)_msg;
  1091. need_dummy = hpm_qspi_parse_phase_message(device, qspi_msg, &spi->control_config, &cmd, &addr);
  1092. if (remaining_size == 0) {
  1093. actual_len = hpm_spi_send_no_data(spi->spi_base, &spi->control_config, &cmd, &addr, qspi_msg);
  1094. } else
  1095. #endif
  1096. {
  1097. if (remaining_size > 0) {
  1098. /* If the length is less than SPI_SOC_FIFO_DEPTH, use fifo mode to transmit data */
  1099. stat = hpm_spi_transmit_use_fifo(device, _msg, &remaining_size, &cmd, &addr, &need_dummy, &actual_len);
  1100. if (stat == status_success) {
  1101. _msg = _msg->next;
  1102. continue;
  1103. }
  1104. hpm_spi_transfer_data_cache_handle(_msg, &dma_buf_ctx, msg->length);
  1105. tx_buf = dma_buf_ctx.aligned_tx_buf;
  1106. rx_buf = dma_buf_ctx.aligned_rx_buf;
  1107. dma_mgr_disable_chn_irq(&spi->rx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  1108. dma_mgr_disable_chn_irq(&spi->tx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  1109. }
  1110. }
  1111. while (remaining_size > 0) {
  1112. transfer_len = MIN(SPI_SOC_TRANSFER_COUNT_MAX, remaining_size);
  1113. /* Next sub-packet: Disable CMD and ADDR phase for the following packet */
  1114. if (index > 0) {
  1115. spi->control_config.master_config.cmd_enable = RT_FALSE;
  1116. spi->control_config.master_config.addr_enable = RT_FALSE;
  1117. }
  1118. if (_msg->send_buf != NULL && _msg->recv_buf != NULL) {
  1119. dma_mgr_enable_chn_irq(&spi->rx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  1120. dma_mgr_enable_chn_irq(&spi->tx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  1121. spi->control_config.common_config.tx_dma_enable = RT_TRUE;
  1122. spi->control_config.common_config.rx_dma_enable = RT_TRUE;
  1123. spi->control_config.common_config.trans_mode = spi_trans_write_read_together;
  1124. /* for spi_trans_write_read_together mode, the operation sequence is recommended as follows: */
  1125. /* first: config rx dma transfer and start rx dma */
  1126. stat = hpm_spi_rx_dma_config(device, rx_buf, transfer_len);
  1127. if (stat != status_success) {
  1128. break;
  1129. }
  1130. stat = hpm_spi_rx_dma_start(device);
  1131. if (stat != status_success) {
  1132. break;
  1133. }
  1134. /* second: config tx dma transfer */
  1135. stat = hpm_spi_tx_dma_config(device, tx_buf, transfer_len);
  1136. if (stat != status_success) {
  1137. break;
  1138. }
  1139. /* third: config spi */
  1140. stat = spi_control_init(spi->spi_base, &spi->control_config, transfer_len, transfer_len);
  1141. if (stat != status_success) {
  1142. break;
  1143. }
  1144. /* fourth: set spi address and enable spi rx /tx dma */
  1145. spi_write_address(spi->spi_base, spi_master_mode, &spi->control_config, &addr);
  1146. spi_enable_rx_dma(spi->spi_base);
  1147. #if defined(HPM_IP_FEATURE_SPI_DMA_TX_REQ_AFTER_CMD_FO_MASTER) && (HPM_IP_FEATURE_SPI_DMA_TX_REQ_AFTER_CMD_FO_MASTER == 1)
  1148. spi_master_enable_tx_dma_request_after_cmd_write(spi->spi_base);
  1149. #endif
  1150. spi_enable_tx_dma(spi->spi_base);
  1151. /* fifth: start tx dma */
  1152. stat = hpm_spi_tx_dma_start(device);
  1153. if (stat != status_success) {
  1154. break;
  1155. }
  1156. /* sixth: Write the command, which marks the beginning of an SPI transfer */
  1157. spi_write_command(spi->spi_base, spi_master_mode, &spi->control_config, &cmd);
  1158. /* to ensure complete transmission, check both SPI transfer completion and DMA transfer completion */
  1159. rt_sem_take(spi->spi_xfer_done_sem, RT_WAITING_FOREVER);
  1160. rt_sem_take(spi->txdma_xfer_done_sem, RT_WAITING_FOREVER);
  1161. rt_sem_take(spi->rxdma_xfer_done_sem, RT_WAITING_FOREVER);
  1162. } else if (_msg->send_buf != NULL) {
  1163. dma_mgr_enable_chn_irq(&spi->tx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  1164. spi->control_config.common_config.tx_dma_enable = RT_TRUE;
  1165. spi->control_config.common_config.rx_dma_enable = RT_FALSE;
  1166. if ((need_dummy == RT_TRUE) && (index == 0)) {
  1167. spi->control_config.common_config.trans_mode = spi_trans_dummy_write;
  1168. } else {
  1169. spi->control_config.common_config.trans_mode = spi_trans_write_only;
  1170. }
  1171. stat = spi_setup_dma_transfer(spi->spi_base, &spi->control_config, &cmd, &addr, transfer_len, RT_NULL);
  1172. if (stat != status_success) {
  1173. break;
  1174. }
  1175. stat = hpm_spi_tx_dma_config(device, tx_buf, transfer_len);
  1176. if (stat != status_success) {
  1177. break;
  1178. }
  1179. stat = hpm_spi_tx_dma_start(device);
  1180. if (stat != status_success) {
  1181. break;
  1182. }
  1183. /* to ensure complete transmission, check both SPI transfer completion and DMA transfer completion */
  1184. rt_sem_take(spi->spi_xfer_done_sem, RT_WAITING_FOREVER);
  1185. rt_sem_take(spi->txdma_xfer_done_sem, RT_WAITING_FOREVER);
  1186. } else if (_msg->recv_buf != NULL) {
  1187. dma_mgr_enable_chn_irq(&spi->rx_dma, DMA_MGR_INTERRUPT_MASK_TC);
  1188. spi->control_config.common_config.tx_dma_enable = RT_FALSE;
  1189. spi->control_config.common_config.rx_dma_enable = RT_TRUE;
  1190. if ((need_dummy == RT_TRUE) && (index == 0)) {
  1191. spi->control_config.common_config.trans_mode = spi_trans_dummy_read;
  1192. } else {
  1193. spi->control_config.common_config.trans_mode = spi_trans_read_only;
  1194. }
  1195. stat = hpm_spi_rx_dma_config(device, rx_buf, transfer_len);
  1196. if (stat != status_success) {
  1197. break;
  1198. }
  1199. stat = hpm_spi_rx_dma_start(device);
  1200. if (stat != status_success) {
  1201. break;
  1202. }
  1203. stat = spi_setup_dma_transfer(spi->spi_base, &spi->control_config, &cmd, &addr, RT_NULL, transfer_len);
  1204. if (stat != status_success) {
  1205. break;
  1206. }
  1207. /* to ensure complete transmission, check both SPI transfer completion and DMA transfer completion */
  1208. rt_sem_take(spi->spi_xfer_done_sem, RT_WAITING_FOREVER);
  1209. rt_sem_take(spi->rxdma_xfer_done_sem, RT_WAITING_FOREVER);
  1210. }
  1211. if (tx_buf != NULL) {
  1212. tx_buf += transfer_len;
  1213. }
  1214. if (rx_buf != NULL) {
  1215. rx_buf += transfer_len;
  1216. }
  1217. remaining_size -= transfer_len;
  1218. actual_len += transfer_len;
  1219. index++;
  1220. }
  1221. if (l1c_dc_is_enabled() && (_msg->length > 0)) {
  1222. if (((rt_uint32_t)msg->send_buf % HPM_L1C_CACHELINE_SIZE) || (_msg->length % HPM_L1C_CACHELINE_SIZE)) {
  1223. if (dma_buf_ctx.aligned_tx_buf != RT_NULL) {
  1224. rt_free_align(dma_buf_ctx.raw_alloc_tx_buf);
  1225. dma_buf_ctx.raw_alloc_tx_buf = RT_NULL;
  1226. dma_buf_ctx.aligned_tx_buf = RT_NULL;
  1227. }
  1228. }
  1229. if ((l1c_dc_is_enabled() == true) && (_msg->recv_buf != RT_NULL) && (dma_buf_ctx.aligned_rx_buf != RT_NULL)) {
  1230. l1c_dc_invalidate((uint32_t) dma_buf_ctx.aligned_rx_buf, dma_buf_ctx.aligned_size);
  1231. if (((rt_uint32_t)msg->recv_buf % HPM_L1C_CACHELINE_SIZE) || (_msg->length % HPM_L1C_CACHELINE_SIZE)) {
  1232. rt_memcpy(_msg->recv_buf, dma_buf_ctx.aligned_rx_buf, _msg->length);
  1233. rt_free_align(dma_buf_ctx.raw_alloc_rx_buf);
  1234. dma_buf_ctx.raw_alloc_rx_buf = RT_NULL;
  1235. dma_buf_ctx.aligned_rx_buf = RT_NULL;
  1236. }
  1237. }
  1238. }
  1239. if (stat != status_success) {
  1240. actual_len = -RT_EIO;
  1241. break;
  1242. }
  1243. _msg = _msg->next;
  1244. }
  1245. spi_disable_interrupt(spi->spi_base, spi_end_int);
  1246. return actual_len;
  1247. }
  1248. static rt_ssize_t hpm_spi_xfer(struct rt_spi_device *device, struct rt_spi_message *msg)
  1249. {
  1250. RT_ASSERT(device != RT_NULL);
  1251. RT_ASSERT(msg != RT_NULL);
  1252. RT_ASSERT(device->bus != RT_NULL);
  1253. RT_ASSERT(device->bus->parent.user_data != RT_NULL);
  1254. rt_ssize_t len;
  1255. cs_ctrl_callback_t cs_pin_control = (cs_ctrl_callback_t) device->parent.user_data;
  1256. struct hpm_spi *spi = (struct hpm_spi *) (device->bus->parent.user_data);
  1257. hpm_stat_t spi_stat = status_success;
  1258. if (device->cs_pin == PIN_NONE) {
  1259. if ((cs_pin_control != NULL) && msg->cs_take) {
  1260. cs_pin_control(SPI_CS_TAKE);
  1261. }
  1262. } else {
  1263. if (msg->cs_take && !(device->config.mode & RT_SPI_NO_CS)) {
  1264. if (device->config.mode & RT_SPI_CS_HIGH) {
  1265. rt_pin_write(device->cs_pin, PIN_HIGH);
  1266. } else {
  1267. rt_pin_write(device->cs_pin, PIN_LOW);
  1268. }
  1269. }
  1270. }
  1271. if (spi->enable_dma) {
  1272. len = hpm_spi_xfer_dma(device, msg);
  1273. } else {
  1274. len = hpm_spi_xfer_polling(device, msg);
  1275. }
  1276. if (device->cs_pin == PIN_NONE) {
  1277. if ((cs_pin_control != NULL) && msg->cs_release) {
  1278. cs_pin_control(SPI_CS_RELEASE);
  1279. }
  1280. } else {
  1281. if (msg->cs_release && !(device->config.mode & RT_SPI_NO_CS)) {
  1282. if (device->config.mode & RT_SPI_CS_HIGH) {
  1283. rt_pin_write(device->cs_pin, PIN_LOW);
  1284. } else {
  1285. rt_pin_write(device->cs_pin, PIN_HIGH);
  1286. }
  1287. }
  1288. }
  1289. return len;
  1290. }
  1291. #ifdef RT_USING_QSPI
  1292. void enter_qspi_mode(struct rt_qspi_device *device)
  1293. {
  1294. (void)device;
  1295. }
  1296. void exit_qspi_mode(struct rt_qspi_device *device)
  1297. {
  1298. (void)device;
  1299. }
  1300. #endif
  1301. rt_err_t rt_hw_spi_device_attach(const char *bus_name, const char *device_name, cs_ctrl_callback_t callback)
  1302. {
  1303. RT_ASSERT(bus_name != RT_NULL);
  1304. RT_ASSERT(device_name != RT_NULL);
  1305. rt_err_t result;
  1306. #ifdef RT_USING_QSPI
  1307. struct rt_qspi_device *qspi_dev;
  1308. qspi_dev = (struct rt_qspi_device *) rt_malloc(sizeof(struct rt_qspi_device));
  1309. qspi_dev->enter_qspi_mode = enter_qspi_mode;
  1310. qspi_dev->exit_qspi_mode = exit_qspi_mode;
  1311. RT_ASSERT(qspi_dev != RT_NULL);
  1312. result = rt_spi_bus_attach_device(&qspi_dev->parent, device_name, bus_name, (void*)callback);
  1313. RT_ASSERT(result == RT_EOK);
  1314. struct hpm_spi *spi = RT_NULL;
  1315. spi = (struct hpm_spi *)(qspi_dev->parent.bus->parent.user_data);
  1316. switch (spi->spi_io_mode) {
  1317. case spi_single_io_mode:
  1318. qspi_dev->config.qspi_dl_width = 1;
  1319. break;
  1320. case spi_dual_io_mode:
  1321. qspi_dev->config.qspi_dl_width = 2;
  1322. break;
  1323. case spi_quad_io_mode:
  1324. qspi_dev->config.qspi_dl_width = 4;
  1325. break;
  1326. default:
  1327. qspi_dev->config.qspi_dl_width = 1;
  1328. break;
  1329. }
  1330. #else
  1331. struct rt_spi_device *spi_device;
  1332. /* attach the device to spi bus*/
  1333. spi_device = (struct rt_spi_device *) rt_malloc(sizeof(struct rt_spi_device));
  1334. RT_ASSERT(spi_device != RT_NULL);
  1335. result = rt_spi_bus_attach_device(spi_device, device_name, bus_name, (void*)callback);
  1336. RT_ASSERT(result == RT_EOK);
  1337. #endif
  1338. return result;
  1339. }
  1340. int rt_hw_spi_init(void)
  1341. {
  1342. rt_err_t ret = RT_EOK;
  1343. hpm_stat_t stat;
  1344. dma_mgr_chn_conf_t chg_config;
  1345. for (uint32_t i = 0; i < sizeof(hpm_spis) / sizeof(hpm_spis[0]); i++)
  1346. {
  1347. struct hpm_spi *spi = &hpm_spis[i];
  1348. spi->spi_bus.parent.user_data = spi;
  1349. clock_add_to_group(spi->clk_name, BOARD_RUNNING_CORE & 0x1);
  1350. if (spi->enable_dma)
  1351. {
  1352. dma_mgr_get_default_chn_config(&chg_config);
  1353. chg_config.src_width = DMA_MGR_TRANSFER_WIDTH_BYTE;
  1354. chg_config.dst_width = DMA_MGR_TRANSFER_WIDTH_BYTE;
  1355. /* spi tx dma config */
  1356. stat = dma_mgr_request_resource(&spi->tx_dma);
  1357. if (stat != status_success)
  1358. {
  1359. LOG_E("[spi%d]tx dma request resource failed\n", i);
  1360. return -RT_ERROR;
  1361. }
  1362. chg_config.src_mode = DMA_MGR_HANDSHAKE_MODE_NORMAL;
  1363. chg_config.src_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_INCREMENT;
  1364. chg_config.dst_mode = DMA_MGR_HANDSHAKE_MODE_HANDSHAKE;
  1365. chg_config.dst_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_FIXED;
  1366. chg_config.dst_addr = (uint32_t)&spi->spi_base->DATA;
  1367. chg_config.en_dmamux = true;
  1368. chg_config.dmamux_src = spi->tx_dmamux;
  1369. dma_mgr_setup_channel(&spi->tx_dma, &chg_config);
  1370. dma_mgr_install_chn_tc_callback(&spi->tx_dma, spi_dma_channel_tc_callback, (void *)&hpm_spis[i]);
  1371. /* spi rx dma config */
  1372. stat = dma_mgr_request_resource(&spi->rx_dma);
  1373. if (stat != status_success) {
  1374. LOG_E("[spi%d]rx dma request resource failed\n", i);
  1375. return -RT_ERROR;
  1376. }
  1377. chg_config.src_mode = DMA_MGR_HANDSHAKE_MODE_HANDSHAKE;
  1378. chg_config.src_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_FIXED;
  1379. chg_config.src_addr = (uint32_t)&spi->spi_base->DATA;
  1380. chg_config.dst_mode = DMA_MGR_HANDSHAKE_MODE_NORMAL;
  1381. chg_config.dst_addr_ctrl = DMA_MGR_ADDRESS_CONTROL_INCREMENT;
  1382. chg_config.en_dmamux = true;
  1383. chg_config.dmamux_src = spi->rx_dmamux;
  1384. dma_mgr_setup_channel(&spi->rx_dma, &chg_config);
  1385. dma_mgr_install_chn_tc_callback(&spi->rx_dma, spi_dma_channel_tc_callback, (void *)&hpm_spis[i]);
  1386. intc_m_enable_irq_with_priority(hpm_spis[i].spi_irq, hpm_spis[i].spi_irq_priority);
  1387. dma_mgr_enable_dma_irq_with_priority(&spi->tx_dma, 1);
  1388. dma_mgr_enable_dma_irq_with_priority(&spi->rx_dma, 1);
  1389. }
  1390. #ifdef RT_USING_QSPI
  1391. ret = rt_qspi_bus_register(&spi->spi_bus, spi->bus_name, &hpm_spi_ops);
  1392. #else
  1393. ret = rt_spi_bus_register(&spi->spi_bus, spi->bus_name, &hpm_spi_ops);
  1394. #endif
  1395. if (ret != RT_EOK)
  1396. {
  1397. break;
  1398. }
  1399. char sem_name[RT_NAME_MAX];
  1400. rt_sprintf(sem_name, "%s_s", hpm_spis[i].bus_name);
  1401. hpm_spis[i].xfer_sem = rt_sem_create(sem_name, 0, RT_IPC_FLAG_PRIO);
  1402. if (hpm_spis[i].xfer_sem == RT_NULL)
  1403. {
  1404. ret = RT_ENOMEM;
  1405. break;
  1406. }
  1407. rt_sprintf(sem_name, "%s_ds", hpm_spis[i].bus_name);
  1408. hpm_spis[i].spi_xfer_done_sem = rt_sem_create(sem_name, 0, RT_IPC_FLAG_PRIO);
  1409. if (hpm_spis[i].spi_xfer_done_sem == RT_NULL)
  1410. {
  1411. ret = RT_ENOMEM;
  1412. break;
  1413. }
  1414. rt_sprintf(sem_name, "%s_rds", hpm_spis[i].bus_name);
  1415. hpm_spis[i].rxdma_xfer_done_sem = rt_sem_create(sem_name, 0, RT_IPC_FLAG_PRIO);
  1416. if (hpm_spis[i].rxdma_xfer_done_sem == RT_NULL)
  1417. {
  1418. ret = RT_ENOMEM;
  1419. break;
  1420. }
  1421. rt_sprintf(sem_name, "%s_tds", hpm_spis[i].bus_name);
  1422. hpm_spis[i].txdma_xfer_done_sem = rt_sem_create(sem_name, 0, RT_IPC_FLAG_PRIO);
  1423. if (hpm_spis[i].txdma_xfer_done_sem == RT_NULL)
  1424. {
  1425. ret = RT_ENOMEM;
  1426. break;
  1427. }
  1428. }
  1429. return ret;
  1430. }
  1431. INIT_BOARD_EXPORT(rt_hw_spi_init);
  1432. #endif /*BSP_USING_SPI*/