spi_slave_hal_iram.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #include "hal/spi_slave_hal.h"
  2. #include "hal/spi_ll.h"
  3. #include "soc/ext_mem_defs.h"
  4. #include "soc/soc_caps.h"
  5. //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
  6. #if SOC_GDMA_SUPPORTED
  7. #if (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB) && (SOC_AHB_GDMA_VERSION == 1)
  8. #include "soc/gdma_struct.h"
  9. #include "hal/gdma_ll.h"
  10. #define spi_dma_ll_rx_reset(dev, chan) gdma_ll_rx_reset_channel(&GDMA, chan)
  11. #define spi_dma_ll_tx_reset(dev, chan) gdma_ll_tx_reset_channel(&GDMA, chan);
  12. #define spi_dma_ll_rx_start(dev, chan, addr) do {\
  13. gdma_ll_rx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
  14. gdma_ll_rx_start(&GDMA, chan);\
  15. } while (0)
  16. #define spi_dma_ll_tx_start(dev, chan, addr) do {\
  17. gdma_ll_tx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
  18. gdma_ll_tx_start(&GDMA, chan);\
  19. } while (0)
  20. #elif (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI) //TODO: IDF-6152, refactor spi hal layer
  21. #include "hal/axi_dma_ll.h"
  22. #define spi_dma_ll_rx_reset(dev, chan) axi_dma_ll_rx_reset_channel(&AXI_DMA, chan)
  23. #define spi_dma_ll_tx_reset(dev, chan) axi_dma_ll_tx_reset_channel(&AXI_DMA, chan);
  24. #define spi_dma_ll_rx_start(dev, chan, addr) do {\
  25. axi_dma_ll_rx_set_desc_addr(&AXI_DMA, chan, (uint32_t)addr);\
  26. axi_dma_ll_rx_start(&AXI_DMA, chan);\
  27. } while (0)
  28. #define spi_dma_ll_tx_start(dev, chan, addr) do {\
  29. axi_dma_ll_tx_set_desc_addr(&AXI_DMA, chan, (uint32_t)addr);\
  30. axi_dma_ll_tx_start(&AXI_DMA, chan);\
  31. } while (0)
  32. #endif
  33. #endif //SOC_GDMA_SUPPORTED
  34. bool spi_slave_hal_usr_is_done(spi_slave_hal_context_t* hal)
  35. {
  36. return spi_ll_usr_is_done(hal->hw);
  37. }
  38. void spi_slave_hal_user_start(const spi_slave_hal_context_t *hal)
  39. {
  40. spi_ll_clear_int_stat(hal->hw); //clear int bit
  41. spi_ll_user_start(hal->hw);
  42. }
  43. #if SOC_NON_CACHEABLE_OFFSET
  44. #define ADDR_DMA_2_CPU(addr) ((typeof(addr))((uint32_t)(addr) + SOC_NON_CACHEABLE_OFFSET))
  45. #define ADDR_CPU_2_DMA(addr) ((typeof(addr))((uint32_t)(addr) - SOC_NON_CACHEABLE_OFFSET))
  46. #else
  47. #define ADDR_DMA_2_CPU(addr) (addr)
  48. #define ADDR_CPU_2_DMA(addr) (addr)
  49. #endif
  50. static void s_spi_slave_hal_dma_desc_setup_link(spi_dma_desc_t *dmadesc, const void *data, int len, bool is_rx)
  51. {
  52. dmadesc = ADDR_DMA_2_CPU(dmadesc);
  53. int n = 0;
  54. while (len) {
  55. int dmachunklen = len;
  56. if (dmachunklen > DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED) {
  57. dmachunklen = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
  58. }
  59. if (is_rx) {
  60. //Receive needs DMA length rounded to next 32-bit boundary
  61. dmadesc[n].dw0.size = (dmachunklen + 3) & (~3);
  62. } else {
  63. dmadesc[n].dw0.size = dmachunklen;
  64. dmadesc[n].dw0.length = dmachunklen;
  65. }
  66. dmadesc[n].buffer = (uint8_t *)data;
  67. dmadesc[n].dw0.suc_eof = 0;
  68. dmadesc[n].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  69. dmadesc[n].next = ADDR_CPU_2_DMA(&dmadesc[n + 1]);
  70. len -= dmachunklen;
  71. data += dmachunklen;
  72. n++;
  73. }
  74. dmadesc[n - 1].dw0.suc_eof = 1; //Mark last DMA desc as end of stream.
  75. dmadesc[n - 1].next = NULL;
  76. }
  77. void spi_slave_hal_prepare_data(const spi_slave_hal_context_t *hal)
  78. {
  79. if (hal->use_dma) {
  80. //Fill DMA descriptors
  81. if (hal->rx_buffer) {
  82. s_spi_slave_hal_dma_desc_setup_link(hal->dmadesc_rx, hal->rx_buffer, ((hal->bitlen + 7) / 8), true);
  83. //reset dma inlink, this should be reset before spi related reset
  84. spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
  85. spi_ll_dma_rx_fifo_reset(hal->dma_in);
  86. spi_ll_slave_reset(hal->hw);
  87. spi_ll_infifo_full_clr(hal->hw);
  88. spi_ll_dma_rx_enable(hal->hw, 1);
  89. spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, (lldesc_t *)hal->dmadesc_rx);
  90. }
  91. if (hal->tx_buffer) {
  92. s_spi_slave_hal_dma_desc_setup_link(hal->dmadesc_tx, hal->tx_buffer, (hal->bitlen + 7) / 8, false);
  93. //reset dma outlink, this should be reset before spi related reset
  94. spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
  95. spi_ll_dma_tx_fifo_reset(hal->dma_out);
  96. spi_ll_slave_reset(hal->hw);
  97. spi_ll_outfifo_empty_clr(hal->hw);
  98. spi_ll_dma_tx_enable(hal->hw, 1);
  99. spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, (lldesc_t *)hal->dmadesc_tx);
  100. }
  101. } else {
  102. //No DMA. Turn off SPI and copy data to transmit buffers.
  103. if (hal->tx_buffer) {
  104. spi_ll_slave_reset(hal->hw);
  105. spi_ll_write_buffer(hal->hw, hal->tx_buffer, hal->bitlen);
  106. }
  107. spi_ll_cpu_tx_fifo_reset(hal->hw);
  108. }
  109. spi_ll_slave_set_rx_bitlen(hal->hw, hal->bitlen);
  110. spi_ll_slave_set_tx_bitlen(hal->hw, hal->bitlen);
  111. #ifdef CONFIG_IDF_TARGET_ESP32
  112. //SPI Slave mode on ESP32 requires MOSI/MISO enable
  113. spi_ll_enable_mosi(hal->hw, (hal->rx_buffer == NULL) ? 0 : 1);
  114. spi_ll_enable_miso(hal->hw, (hal->tx_buffer == NULL) ? 0 : 1);
  115. #endif
  116. }
  117. void spi_slave_hal_store_result(spi_slave_hal_context_t *hal)
  118. {
  119. //when data of cur_trans->length are all sent, the slv_rdata_bit
  120. //will be the length sent-1 (i.e. cur_trans->length-1 ), otherwise
  121. //the length sent.
  122. hal->rcv_bitlen = spi_ll_slave_get_rcv_bitlen(hal->hw);
  123. if (hal->rcv_bitlen == hal->bitlen - 1) {
  124. hal->rcv_bitlen++;
  125. }
  126. if (!hal->use_dma && hal->rx_buffer) {
  127. //Copy result out
  128. spi_ll_read_buffer(hal->hw, hal->rx_buffer, hal->bitlen);
  129. }
  130. }
  131. uint32_t spi_slave_hal_get_rcv_bitlen(spi_slave_hal_context_t *hal)
  132. {
  133. return hal->rcv_bitlen;
  134. }
  135. #if CONFIG_IDF_TARGET_ESP32
  136. //This workaround is only for esp32
  137. bool spi_slave_hal_dma_need_reset(const spi_slave_hal_context_t *hal)
  138. {
  139. bool ret;
  140. ret = false;
  141. if (hal->use_dma && hal->rx_buffer) {
  142. int i;
  143. //In case CS goes high too soon, the transfer is aborted while the DMA channel still thinks it's going. This
  144. //leads to issues later on, so in that case we need to reset the channel. The state can be detected because
  145. //the DMA system doesn't give back the offending descriptor; the owner is still set to DMA.
  146. for (i = 0; hal->dmadesc_rx[i].dw0.suc_eof == 0 && hal->dmadesc_rx[i].dw0.owner == 0; i++) {}
  147. if (hal->dmadesc_rx[i].dw0.owner) {
  148. ret = true;
  149. }
  150. }
  151. return ret;
  152. }
  153. #endif //#if CONFIG_IDF_TARGET_ESP32