spi_slave_hd_hal.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. // The HAL layer for SPI Slave HD
  7. #include <string.h>
  8. #include "esp_types.h"
  9. #include "esp_attr.h"
  10. #include "esp_err.h"
  11. #include "sdkconfig.h"
  12. #include "soc/spi_periph.h"
  13. #include "soc/lldesc.h"
  14. #include "soc/soc_caps.h"
  15. #include "soc/ext_mem_defs.h"
  16. #include "hal/spi_slave_hd_hal.h"
  17. #include "hal/assert.h"
  18. //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
  19. #if SOC_GDMA_SUPPORTED
  20. #if (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB) && (SOC_AHB_GDMA_VERSION == 1)
  21. #include "soc/gdma_struct.h"
  22. #include "hal/gdma_ll.h"
  23. #define spi_dma_ll_tx_restart(dev, chan) gdma_ll_tx_restart(&GDMA, chan)
  24. #define spi_dma_ll_rx_restart(dev, chan) gdma_ll_rx_restart(&GDMA, chan)
  25. #define spi_dma_ll_rx_reset(dev, chan) gdma_ll_rx_reset_channel(&GDMA, chan)
  26. #define spi_dma_ll_tx_reset(dev, chan) gdma_ll_tx_reset_channel(&GDMA, chan)
  27. #define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) gdma_ll_rx_enable_data_burst(&GDMA, chan, enable)
  28. #define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) gdma_ll_tx_enable_data_burst(&GDMA, chan, enable)
  29. #define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) gdma_ll_rx_enable_descriptor_burst(&GDMA, chan, enable)
  30. #define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) gdma_ll_tx_enable_descriptor_burst(&GDMA, chan, enable)
  31. #define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) gdma_ll_tx_enable_auto_write_back(&GDMA, chan, enable)
  32. #define spi_dma_ll_set_out_eof_generation(dev, chan, enable) gdma_ll_tx_set_eof_mode(&GDMA, chan, enable)
  33. #define spi_dma_ll_get_out_eof_desc_addr(dev, chan) gdma_ll_tx_get_eof_desc_addr(&GDMA, chan)
  34. #define spi_dma_ll_get_in_suc_eof_desc_addr(dev, chan) gdma_ll_rx_get_success_eof_desc_addr(&GDMA, chan)
  35. #define spi_dma_ll_rx_start(dev, chan, addr) do {\
  36. gdma_ll_rx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
  37. gdma_ll_rx_start(&GDMA, chan);\
  38. } while (0)
  39. #define spi_dma_ll_tx_start(dev, chan, addr) do {\
  40. gdma_ll_tx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
  41. gdma_ll_tx_start(&GDMA, chan);\
  42. } while (0)
  43. #elif (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
  44. #include "hal/axi_dma_ll.h"
  45. #define spi_dma_ll_tx_restart(dev, chan) axi_dma_ll_tx_restart(&AXI_DMA, chan)
  46. #define spi_dma_ll_rx_restart(dev, chan) axi_dma_ll_rx_restart(&AXI_DMA, chan)
  47. #define spi_dma_ll_rx_reset(dev, chan) axi_dma_ll_rx_reset_channel(&AXI_DMA, chan)
  48. #define spi_dma_ll_tx_reset(dev, chan) axi_dma_ll_tx_reset_channel(&AXI_DMA, chan)
  49. #define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) axi_dma_ll_rx_enable_data_burst(&AXI_DMA, chan, enable)
  50. #define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) axi_dma_ll_tx_enable_data_burst(&AXI_DMA, chan, enable)
  51. #define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) axi_dma_ll_rx_enable_descriptor_burst(&AXI_DMA, chan, enable)
  52. #define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) axi_dma_ll_tx_enable_descriptor_burst(&AXI_DMA, chan, enable)
  53. #define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) axi_dma_ll_tx_enable_auto_write_back(&AXI_DMA, chan, enable)
  54. #define spi_dma_ll_set_out_eof_generation(dev, chan, enable) axi_dma_ll_tx_set_eof_mode(&AXI_DMA, chan, enable)
  55. #define spi_dma_ll_get_out_eof_desc_addr(dev, chan) axi_dma_ll_tx_get_eof_desc_addr(&AXI_DMA, chan)
  56. #define spi_dma_ll_get_in_suc_eof_desc_addr(dev, chan) axi_dma_ll_rx_get_success_eof_desc_addr(&AXI_DMA, chan)
  57. #define spi_dma_ll_rx_start(dev, chan, addr) do {\
  58. axi_dma_ll_rx_set_desc_addr(&AXI_DMA, chan, (uint32_t)addr);\
  59. axi_dma_ll_rx_start(&AXI_DMA, chan);\
  60. } while (0)
  61. #define spi_dma_ll_tx_start(dev, chan, addr) do {\
  62. axi_dma_ll_tx_set_desc_addr(&AXI_DMA, chan, (uint32_t)addr);\
  63. axi_dma_ll_tx_start(&AXI_DMA, chan);\
  64. } while (0)
  65. #endif
  66. #endif //SOC_GDMA_SUPPORTED
  67. static void s_spi_slave_hd_hal_dma_init_config(const spi_slave_hd_hal_context_t *hal)
  68. {
  69. spi_dma_ll_rx_enable_burst_data(hal->dma_in, hal->rx_dma_chan, 1);
  70. spi_dma_ll_tx_enable_burst_data(hal->dma_out, hal->tx_dma_chan, 1);
  71. spi_dma_ll_rx_enable_burst_desc(hal->dma_in, hal->rx_dma_chan, 1);
  72. spi_dma_ll_tx_enable_burst_desc(hal->dma_out, hal->tx_dma_chan, 1);
  73. spi_dma_ll_enable_out_auto_wrback(hal->dma_out, hal->tx_dma_chan, 1);
  74. spi_dma_ll_set_out_eof_generation(hal->dma_out, hal->tx_dma_chan, 1);
  75. }
  76. void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_hal_config_t *hal_config)
  77. {
  78. spi_dev_t *hw = SPI_LL_GET_HW(hal_config->host_id);
  79. hal->dev = hw;
  80. hal->dma_in = hal_config->dma_in;
  81. hal->dma_out = hal_config->dma_out;
  82. hal->dma_enabled = hal_config->dma_enabled;
  83. hal->tx_dma_chan = hal_config->tx_dma_chan;
  84. hal->rx_dma_chan = hal_config->rx_dma_chan;
  85. hal->append_mode = hal_config->append_mode;
  86. hal->tx_cur_desc = hal->dmadesc_tx;
  87. hal->rx_cur_desc = hal->dmadesc_rx;
  88. hal->tx_dma_head = hal->dmadesc_tx + hal->dma_desc_num -1;
  89. hal->rx_dma_head = hal->dmadesc_rx + hal->dma_desc_num -1;
  90. //Configure slave
  91. if (hal_config->dma_enabled) {
  92. s_spi_slave_hd_hal_dma_init_config(hal);
  93. }
  94. spi_ll_slave_hd_init(hw);
  95. spi_ll_set_addr_bitlen(hw, hal_config->address_bits);
  96. spi_ll_set_command_bitlen(hw, hal_config->command_bits);
  97. spi_ll_set_dummy(hw, hal_config->dummy_bits);
  98. spi_ll_set_rx_lsbfirst(hw, hal_config->rx_lsbfirst);
  99. spi_ll_set_tx_lsbfirst(hw, hal_config->tx_lsbfirst);
  100. spi_ll_slave_set_mode(hw, hal_config->mode, (hal_config->dma_enabled));
  101. spi_ll_disable_intr(hw, UINT32_MAX);
  102. spi_ll_clear_intr(hw, UINT32_MAX);
  103. if (!hal_config->append_mode) {
  104. spi_ll_set_intr(hw, SPI_LL_INTR_CMD7 | SPI_LL_INTR_CMD8);
  105. bool workaround_required = false;
  106. if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD7)) {
  107. hal->intr_not_triggered |= SPI_EV_RECV;
  108. workaround_required = true;
  109. }
  110. if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD8)) {
  111. hal->intr_not_triggered |= SPI_EV_SEND;
  112. workaround_required = true;
  113. }
  114. if (workaround_required) {
  115. //Workaround if the previous interrupts are not writable
  116. spi_ll_set_intr(hw, SPI_LL_INTR_TRANS_DONE);
  117. }
  118. } else {
  119. #if SOC_GDMA_SUPPORTED
  120. spi_ll_enable_intr(hw, SPI_LL_INTR_CMD7);
  121. #else
  122. spi_ll_clear_intr(hw, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_CMD7);
  123. spi_ll_enable_intr(hw, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_CMD7);
  124. #endif //SOC_GDMA_SUPPORTED
  125. }
  126. spi_ll_slave_hd_set_len_cond(hw, SPI_LL_TRANS_LEN_COND_WRBUF |
  127. SPI_LL_TRANS_LEN_COND_WRDMA |
  128. SPI_LL_TRANS_LEN_COND_RDBUF |
  129. SPI_LL_TRANS_LEN_COND_RDDMA);
  130. spi_ll_slave_set_seg_mode(hal->dev, true);
  131. }
  132. #if SOC_NON_CACHEABLE_OFFSET
  133. #define ADDR_DMA_2_CPU(addr) ((typeof(addr))((uint32_t)(addr) + 0x40000000))
  134. #define ADDR_CPU_2_DMA(addr) ((typeof(addr))((uint32_t)(addr) - 0x40000000))
  135. #else
  136. #define ADDR_DMA_2_CPU(addr) (addr)
  137. #define ADDR_CPU_2_DMA(addr) (addr)
  138. #endif
  139. static void s_spi_hal_dma_desc_setup_link(spi_dma_desc_t *dmadesc, const void *data, int len, bool is_rx)
  140. {
  141. dmadesc = ADDR_DMA_2_CPU(dmadesc);
  142. int n = 0;
  143. while (len) {
  144. int dmachunklen = len;
  145. if (dmachunklen > DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED) {
  146. dmachunklen = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
  147. }
  148. if (is_rx) {
  149. //Receive needs DMA length rounded to next 32-bit boundary
  150. dmadesc[n].dw0.size = (dmachunklen + 3) & (~3);
  151. dmadesc[n].dw0.length = (dmachunklen + 3) & (~3);
  152. } else {
  153. dmadesc[n].dw0.size = dmachunklen;
  154. dmadesc[n].dw0.length = dmachunklen;
  155. }
  156. dmadesc[n].buffer = (uint8_t *)data;
  157. dmadesc[n].dw0.suc_eof = 0;
  158. dmadesc[n].dw0.owner = 1;
  159. dmadesc[n].next = ADDR_CPU_2_DMA(&dmadesc[n + 1]);
  160. len -= dmachunklen;
  161. data += dmachunklen;
  162. n++;
  163. }
  164. dmadesc[n - 1].dw0.suc_eof = 1; //Mark last DMA desc as end of stream.
  165. dmadesc[n - 1].next = NULL;
  166. }
  167. static int s_desc_get_received_len_addr(spi_dma_desc_t* head, spi_dma_desc_t** out_next, void **out_buff_head)
  168. {
  169. spi_dma_desc_t* desc_cpu = ADDR_DMA_2_CPU(head);
  170. int len = 0;
  171. if (out_buff_head) {
  172. *out_buff_head = desc_cpu->buffer;
  173. }
  174. while(head) {
  175. len += desc_cpu->dw0.length;
  176. bool eof = desc_cpu->dw0.suc_eof;
  177. desc_cpu = ADDR_DMA_2_CPU(desc_cpu->next);
  178. head = head->next;
  179. if (eof) break;
  180. }
  181. if (out_next) {
  182. *out_next = head;
  183. }
  184. return len;
  185. }
  186. void spi_slave_hd_hal_rxdma(spi_slave_hd_hal_context_t *hal, uint8_t *out_buf, size_t len)
  187. {
  188. s_spi_hal_dma_desc_setup_link(hal->dmadesc_rx->desc, out_buf, len, true);
  189. spi_ll_dma_rx_fifo_reset(hal->dev);
  190. spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
  191. spi_ll_slave_reset(hal->dev);
  192. spi_ll_infifo_full_clr(hal->dev);
  193. spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD7);
  194. spi_ll_dma_rx_enable(hal->dev, 1);
  195. spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, (lldesc_t *)hal->dmadesc_rx->desc);
  196. }
  197. void spi_slave_hd_hal_txdma(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len)
  198. {
  199. s_spi_hal_dma_desc_setup_link(hal->dmadesc_tx->desc, data, len, false);
  200. spi_ll_dma_tx_fifo_reset(hal->dev);
  201. spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
  202. spi_ll_slave_reset(hal->dev);
  203. spi_ll_outfifo_empty_clr(hal->dev);
  204. spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD8);
  205. spi_ll_dma_tx_enable(hal->dev, 1);
  206. spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, (lldesc_t *)hal->dmadesc_tx->desc);
  207. }
  208. static spi_ll_intr_t get_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  209. {
  210. spi_ll_intr_t intr = 0;
  211. #if CONFIG_IDF_TARGET_ESP32S2
  212. if ((ev & SPI_EV_SEND) && hal->append_mode) intr |= SPI_LL_INTR_OUT_EOF;
  213. #endif
  214. if ((ev & SPI_EV_SEND) && !hal->append_mode) intr |= SPI_LL_INTR_CMD8;
  215. if (ev & SPI_EV_RECV) intr |= SPI_LL_INTR_CMD7;
  216. if (ev & SPI_EV_BUF_TX) intr |= SPI_LL_INTR_RDBUF;
  217. if (ev & SPI_EV_BUF_RX) intr |= SPI_LL_INTR_WRBUF;
  218. if (ev & SPI_EV_CMD9) intr |= SPI_LL_INTR_CMD9;
  219. if (ev & SPI_EV_CMDA) intr |= SPI_LL_INTR_CMDA;
  220. if (ev & SPI_EV_TRANS) intr |= SPI_LL_INTR_TRANS_DONE;
  221. return intr;
  222. }
  223. bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  224. {
  225. spi_ll_intr_t intr = get_event_intr(hal, ev);
  226. if (spi_ll_get_intr(hal->dev, intr)) {
  227. spi_ll_clear_intr(hal->dev, intr);
  228. return true;
  229. }
  230. return false;
  231. }
  232. bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  233. {
  234. //The trans_done interrupt is used for the workaround when some interrupt is not writable
  235. spi_ll_intr_t intr = get_event_intr(hal, ev);
  236. // Workaround for these interrupts not writable
  237. uint32_t missing_intr = hal->intr_not_triggered & ev;
  238. if (missing_intr) {
  239. if ((missing_intr & SPI_EV_RECV) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_CMD7)) {
  240. hal->intr_not_triggered &= ~SPI_EV_RECV;
  241. }
  242. if ((missing_intr & SPI_EV_SEND) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_CMD8)) {
  243. hal->intr_not_triggered &= ~SPI_EV_SEND;
  244. }
  245. if (spi_ll_get_intr(hal->dev, SPI_LL_INTR_TRANS_DONE)) {
  246. spi_ll_disable_intr(hal->dev, SPI_LL_INTR_TRANS_DONE);
  247. }
  248. }
  249. if (spi_ll_get_intr(hal->dev, intr)) {
  250. spi_ll_disable_intr(hal->dev, intr);
  251. return true;
  252. }
  253. return false;
  254. }
  255. void spi_slave_hd_hal_enable_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  256. {
  257. spi_ll_intr_t intr = get_event_intr(hal, ev);
  258. spi_ll_enable_intr(hal->dev, intr);
  259. }
  260. void spi_slave_hd_hal_invoke_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  261. {
  262. spi_ll_intr_t intr = get_event_intr(hal, ev);
  263. // Workaround for these interrupts not writable
  264. if (hal->intr_not_triggered & ev & (SPI_EV_RECV | SPI_EV_SEND)) {
  265. intr |= SPI_LL_INTR_TRANS_DONE;
  266. }
  267. spi_ll_enable_intr(hal->dev, intr);
  268. }
  269. void spi_slave_hd_hal_read_buffer(spi_slave_hd_hal_context_t *hal, int addr, uint8_t *out_data, size_t len)
  270. {
  271. spi_ll_read_buffer_byte(hal->dev, addr, out_data, len);
  272. }
  273. void spi_slave_hd_hal_write_buffer(spi_slave_hd_hal_context_t *hal, int addr, uint8_t *data, size_t len)
  274. {
  275. spi_ll_write_buffer_byte(hal->dev, addr, data, len);
  276. }
  277. int spi_slave_hd_hal_get_last_addr(spi_slave_hd_hal_context_t *hal)
  278. {
  279. return spi_ll_slave_hd_get_last_addr(hal->dev);
  280. }
  281. int spi_slave_hd_hal_get_rxlen(spi_slave_hd_hal_context_t *hal)
  282. {
  283. //this is by -byte
  284. return spi_ll_slave_get_rx_byte_len(hal->dev);
  285. }
  286. int spi_slave_hd_hal_rxdma_seg_get_len(spi_slave_hd_hal_context_t *hal)
  287. {
  288. spi_dma_desc_t *desc = hal->dmadesc_rx->desc;
  289. return s_desc_get_received_len_addr(desc, NULL, NULL);
  290. }
  291. bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, void **real_buff_addr)
  292. {
  293. uint32_t desc_now = spi_dma_ll_get_out_eof_desc_addr(hal->dma_out, hal->tx_dma_chan);
  294. if ((uint32_t)hal->tx_dma_head->desc == desc_now) {
  295. return false;
  296. }
  297. //find used paired desc-trans by desc addr
  298. hal->tx_dma_head++;
  299. if (hal->tx_dma_head >= hal->dmadesc_tx + hal->dma_desc_num) {
  300. hal->tx_dma_head = hal->dmadesc_tx;
  301. }
  302. *out_trans = hal->tx_dma_head->arg;
  303. s_desc_get_received_len_addr(hal->tx_dma_head->desc, NULL, real_buff_addr);
  304. hal->tx_recycled_desc_cnt++;
  305. return true;
  306. }
  307. bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, void **real_buff_addr, size_t *out_len)
  308. {
  309. uint32_t desc_now = spi_dma_ll_get_in_suc_eof_desc_addr(hal->dma_in, hal->rx_dma_chan);
  310. if ((uint32_t)hal->rx_dma_head->desc == desc_now) {
  311. return false;
  312. }
  313. //find used paired desc-trans by desc addr
  314. hal->rx_dma_head++;
  315. if (hal->rx_dma_head >= hal->dmadesc_rx + hal->dma_desc_num) {
  316. hal->rx_dma_head = hal->dmadesc_rx;
  317. }
  318. *out_trans = hal->rx_dma_head->arg;
  319. *out_len = s_desc_get_received_len_addr(hal->rx_dma_head->desc, NULL, real_buff_addr);
  320. hal->rx_recycled_desc_cnt++;
  321. return true;
  322. }
  323. esp_err_t spi_slave_hd_hal_txdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg)
  324. {
  325. //Check if there are enough available DMA descriptors for software to use
  326. int num_required = (len + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
  327. int not_recycled_desc_num = hal->tx_used_desc_cnt - hal->tx_recycled_desc_cnt;
  328. int available_desc_num = hal->dma_desc_num - not_recycled_desc_num;
  329. if (num_required > available_desc_num) {
  330. return ESP_ERR_INVALID_STATE;
  331. }
  332. s_spi_hal_dma_desc_setup_link(hal->tx_cur_desc->desc, data, len, false);
  333. hal->tx_cur_desc->arg = arg;
  334. if (!hal->tx_dma_started) {
  335. hal->tx_dma_started = true;
  336. //start a link
  337. hal->tx_dma_tail = hal->tx_cur_desc;
  338. spi_ll_dma_tx_fifo_reset(hal->dma_out);
  339. spi_ll_outfifo_empty_clr(hal->dev);
  340. spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
  341. spi_ll_dma_tx_enable(hal->dev, 1);
  342. spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, (lldesc_t *)hal->tx_cur_desc->desc);
  343. } else {
  344. //there is already a consecutive link
  345. ADDR_DMA_2_CPU(hal->tx_dma_tail->desc)->next = hal->tx_cur_desc->desc;
  346. hal->tx_dma_tail = hal->tx_cur_desc;
  347. spi_dma_ll_tx_restart(hal->dma_out, hal->tx_dma_chan);
  348. }
  349. //Move the current descriptor pointer according to the number of the linked descriptors
  350. for (int i = 0; i < num_required; i++) {
  351. hal->tx_used_desc_cnt++;
  352. hal->tx_cur_desc++;
  353. if (hal->tx_cur_desc == hal->dmadesc_tx + hal->dma_desc_num) {
  354. hal->tx_cur_desc = hal->dmadesc_tx;
  355. }
  356. }
  357. return ESP_OK;
  358. }
  359. esp_err_t spi_slave_hd_hal_rxdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg)
  360. {
  361. //Check if there are enough available dma descriptors for software to use
  362. int num_required = (len + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
  363. int not_recycled_desc_num = hal->rx_used_desc_cnt - hal->rx_recycled_desc_cnt;
  364. int available_desc_num = hal->dma_desc_num - not_recycled_desc_num;
  365. if (num_required > available_desc_num) {
  366. return ESP_ERR_INVALID_STATE;
  367. }
  368. s_spi_hal_dma_desc_setup_link(hal->rx_cur_desc->desc, data, len, false);
  369. hal->rx_cur_desc->arg = arg;
  370. if (!hal->rx_dma_started) {
  371. hal->rx_dma_started = true;
  372. //start a link
  373. hal->rx_dma_tail = hal->rx_cur_desc;
  374. spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
  375. spi_ll_dma_rx_fifo_reset(hal->dma_in);
  376. spi_ll_infifo_full_clr(hal->dev);
  377. spi_ll_dma_rx_enable(hal->dev, 1);
  378. spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, (lldesc_t *)hal->rx_cur_desc->desc);
  379. } else {
  380. //there is already a consecutive link
  381. ADDR_DMA_2_CPU(hal->rx_dma_tail->desc)->next = hal->rx_cur_desc->desc;
  382. hal->rx_dma_tail = hal->rx_cur_desc;
  383. spi_dma_ll_rx_restart(hal->dma_in, hal->rx_dma_chan);
  384. }
  385. //Move the current descriptor pointer according to the number of the linked descriptors
  386. for (int i = 0; i < num_required; i++) {
  387. hal->rx_used_desc_cnt++;
  388. hal->rx_cur_desc++;
  389. if (hal->rx_cur_desc == hal->dmadesc_rx + hal->dma_desc_num) {
  390. hal->rx_cur_desc = hal->dmadesc_rx;
  391. }
  392. }
  393. return ESP_OK;
  394. }