spi_slave_hd_hal.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. // Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // The HAL layer for SPI Slave HD
  15. #include <string.h>
  16. #include "esp_types.h"
  17. #include "esp_attr.h"
  18. #include "esp_err.h"
  19. #include "sdkconfig.h"
  20. #include "soc/spi_periph.h"
  21. #include "soc/lldesc.h"
  22. #include "soc/soc_caps.h"
  23. #include "hal/spi_slave_hd_hal.h"
  24. #include "hal/assert.h"
  25. //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
  26. #if SOC_GDMA_SUPPORTED
  27. #include "soc/gdma_struct.h"
  28. #include "hal/gdma_ll.h"
  29. #define spi_dma_ll_rx_reset(dev, chan) gdma_ll_rx_reset_channel(&GDMA, chan)
  30. #define spi_dma_ll_tx_reset(dev, chan) gdma_ll_tx_reset_channel(&GDMA, chan)
  31. #define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) gdma_ll_rx_enable_data_burst(&GDMA, chan, enable)
  32. #define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) gdma_ll_tx_enable_data_burst(&GDMA, chan, enable)
  33. #define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) gdma_ll_rx_enable_descriptor_burst(&GDMA, chan, enable)
  34. #define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) gdma_ll_tx_enable_descriptor_burst(&GDMA, chan, enable)
  35. #define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) gdma_ll_tx_enable_auto_write_back(&GDMA, chan, enable)
  36. #define spi_dma_ll_set_out_eof_generation(dev, chan, enable) gdma_ll_tx_set_eof_mode(&GDMA, chan, enable)
  37. #define spi_dma_ll_get_out_eof_desc_addr(dev, chan) gdma_ll_tx_get_eof_desc_addr(&GDMA, chan)
  38. #define spi_dma_ll_get_in_suc_eof_desc_addr(dev, chan) gdma_ll_rx_get_success_eof_desc_addr(&GDMA, chan)
  39. #define spi_dma_ll_rx_start(dev, chan, addr) do {\
  40. gdma_ll_rx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
  41. gdma_ll_rx_start(&GDMA, chan);\
  42. } while (0)
  43. #define spi_dma_ll_tx_start(dev, chan, addr) do {\
  44. gdma_ll_tx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
  45. gdma_ll_tx_start(&GDMA, chan);\
  46. } while (0)
  47. #endif
  48. static void s_spi_slave_hd_hal_dma_init_config(const spi_slave_hd_hal_context_t *hal)
  49. {
  50. spi_dma_ll_rx_enable_burst_data(hal->dma_in, hal->rx_dma_chan, 1);
  51. spi_dma_ll_tx_enable_burst_data(hal->dma_out, hal->tx_dma_chan, 1);
  52. spi_dma_ll_rx_enable_burst_desc(hal->dma_in, hal->rx_dma_chan, 1);
  53. spi_dma_ll_tx_enable_burst_desc(hal->dma_out, hal->tx_dma_chan, 1);
  54. spi_dma_ll_enable_out_auto_wrback(hal->dma_out, hal->tx_dma_chan, 1);
  55. spi_dma_ll_set_out_eof_generation(hal->dma_out, hal->tx_dma_chan, 1);
  56. }
  57. void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_hal_config_t *hal_config)
  58. {
  59. spi_dev_t* hw = SPI_LL_GET_HW(hal_config->host_id);
  60. hal->dev = hw;
  61. hal->dma_in = hal_config->dma_in;
  62. hal->dma_out = hal_config->dma_out;
  63. hal->dma_enabled = hal_config->dma_enabled;
  64. hal->tx_dma_chan = hal_config->tx_dma_chan;
  65. hal->rx_dma_chan = hal_config->rx_dma_chan;
  66. hal->append_mode = hal_config->append_mode;
  67. hal->rx_cur_desc = hal->dmadesc_rx;
  68. hal->tx_cur_desc = hal->dmadesc_tx;
  69. STAILQ_NEXT(&hal->tx_dummy_head.desc, qe) = &hal->dmadesc_tx->desc;
  70. hal->tx_dma_head = &hal->tx_dummy_head;
  71. STAILQ_NEXT(&hal->rx_dummy_head.desc, qe) = &hal->dmadesc_rx->desc;
  72. hal->rx_dma_head = &hal->rx_dummy_head;
  73. //Configure slave
  74. s_spi_slave_hd_hal_dma_init_config(hal);
  75. spi_ll_slave_hd_init(hw);
  76. spi_ll_set_addr_bitlen(hw, hal_config->address_bits);
  77. spi_ll_set_command_bitlen(hw, hal_config->command_bits);
  78. spi_ll_set_dummy(hw, hal_config->dummy_bits);
  79. spi_ll_set_rx_lsbfirst(hw, hal_config->rx_lsbfirst);
  80. spi_ll_set_tx_lsbfirst(hw, hal_config->tx_lsbfirst);
  81. spi_ll_slave_set_mode(hw, hal_config->mode, (hal_config->dma_enabled));
  82. spi_ll_disable_intr(hw, UINT32_MAX);
  83. spi_ll_clear_intr(hw, UINT32_MAX);
  84. if (!hal_config->append_mode) {
  85. spi_ll_set_intr(hw, SPI_LL_INTR_CMD7 | SPI_LL_INTR_CMD8);
  86. bool workaround_required = false;
  87. if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD7)) {
  88. hal->intr_not_triggered |= SPI_EV_RECV;
  89. workaround_required = true;
  90. }
  91. if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD8)) {
  92. hal->intr_not_triggered |= SPI_EV_SEND;
  93. workaround_required = true;
  94. }
  95. if (workaround_required) {
  96. //Workaround if the previous interrupts are not writable
  97. spi_ll_set_intr(hw, SPI_LL_INTR_TRANS_DONE);
  98. }
  99. }
  100. #if CONFIG_IDF_TARGET_ESP32S2
  101. //Append mode is only supported on ESP32S2 now
  102. else {
  103. spi_ll_enable_intr(hw, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_CMD7);
  104. }
  105. #endif
  106. spi_ll_slave_hd_set_len_cond(hw, SPI_LL_TRANS_LEN_COND_WRBUF |
  107. SPI_LL_TRANS_LEN_COND_WRDMA |
  108. SPI_LL_TRANS_LEN_COND_RDBUF |
  109. SPI_LL_TRANS_LEN_COND_RDDMA);
  110. spi_ll_slave_set_seg_mode(hal->dev, true);
  111. }
  112. uint32_t spi_salve_hd_hal_get_max_bus_size(spi_slave_hd_hal_context_t *hal)
  113. {
  114. return hal->dma_desc_num * LLDESC_MAX_NUM_PER_DESC;
  115. }
  116. uint32_t spi_slave_hd_hal_get_total_desc_size(spi_slave_hd_hal_context_t *hal, uint32_t bus_size)
  117. {
  118. //See how many dma descriptors we need
  119. int dma_desc_ct = (bus_size + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
  120. if (dma_desc_ct == 0) {
  121. dma_desc_ct = 1; //default to 4k when max is not given
  122. }
  123. hal->dma_desc_num = dma_desc_ct;
  124. return hal->dma_desc_num * sizeof(spi_slave_hd_hal_desc_append_t);
  125. }
  126. void spi_slave_hd_hal_rxdma(spi_slave_hd_hal_context_t *hal, uint8_t *out_buf, size_t len)
  127. {
  128. lldesc_setup_link(&hal->dmadesc_rx->desc, out_buf, len, true);
  129. spi_ll_dma_rx_fifo_reset(hal->dev);
  130. spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
  131. spi_ll_slave_reset(hal->dev);
  132. spi_ll_infifo_full_clr(hal->dev);
  133. spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD7);
  134. spi_ll_dma_rx_enable(hal->dev, 1);
  135. spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, &hal->dmadesc_rx->desc);
  136. }
  137. void spi_slave_hd_hal_txdma(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len)
  138. {
  139. lldesc_setup_link(&hal->dmadesc_tx->desc, data, len, false);
  140. spi_ll_dma_tx_fifo_reset(hal->dev);
  141. spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
  142. spi_ll_slave_reset(hal->dev);
  143. spi_ll_outfifo_empty_clr(hal->dev);
  144. spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD8);
  145. spi_ll_dma_tx_enable(hal->dev, 1);
  146. spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, &hal->dmadesc_tx->desc);
  147. }
  148. static spi_ll_intr_t get_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  149. {
  150. spi_ll_intr_t intr = 0;
  151. #if CONFIG_IDF_TARGET_ESP32S2
  152. //Append mode is only supported on ESP32S2 now
  153. if ((ev & SPI_EV_SEND) && hal->append_mode) intr |= SPI_LL_INTR_OUT_EOF;
  154. #endif
  155. if ((ev & SPI_EV_SEND) && !hal->append_mode) intr |= SPI_LL_INTR_CMD8;
  156. if (ev & SPI_EV_RECV) intr |= SPI_LL_INTR_CMD7;
  157. if (ev & SPI_EV_BUF_TX) intr |= SPI_LL_INTR_RDBUF;
  158. if (ev & SPI_EV_BUF_RX) intr |= SPI_LL_INTR_WRBUF;
  159. if (ev & SPI_EV_CMD9) intr |= SPI_LL_INTR_CMD9;
  160. if (ev & SPI_EV_CMDA) intr |= SPI_LL_INTR_CMDA;
  161. if (ev & SPI_EV_TRANS) intr |= SPI_LL_INTR_TRANS_DONE;
  162. return intr;
  163. }
  164. bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  165. {
  166. spi_ll_intr_t intr = get_event_intr(hal, ev);
  167. if (spi_ll_get_intr(hal->dev, intr)) {
  168. spi_ll_clear_intr(hal->dev, intr);
  169. return true;
  170. }
  171. return false;
  172. }
  173. bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
  174. {
  175. //The trans_done interrupt is used for the workaround when some interrupt is not writable
  176. spi_ll_intr_t intr = get_event_intr(hal, ev);
  177. // Workaround for these interrupts not writable
  178. uint32_t missing_intr = hal->intr_not_triggered & ev;
  179. if (missing_intr) {
  180. if ((missing_intr & SPI_EV_RECV) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_CMD7)) {
  181. hal->intr_not_triggered &= ~SPI_EV_RECV;
  182. }
  183. if ((missing_intr & SPI_EV_SEND) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_CMD8)) {
  184. hal->intr_not_triggered &= ~SPI_EV_SEND;
  185. }
  186. if (spi_ll_get_intr(hal->dev, SPI_LL_INTR_TRANS_DONE)) {
  187. spi_ll_disable_intr(hal->dev, SPI_LL_INTR_TRANS_DONE);
  188. }
  189. }
  190. if (spi_ll_get_intr(hal->dev, intr)) {
  191. spi_ll_disable_intr(hal->dev, intr);
  192. return true;
  193. }
  194. return false;
  195. }
  196. void spi_slave_hd_hal_enable_event_intr(spi_slave_hd_hal_context_t* hal, spi_event_t ev)
  197. {
  198. spi_ll_intr_t intr = get_event_intr(hal, ev);
  199. spi_ll_enable_intr(hal->dev, intr);
  200. }
  201. void spi_slave_hd_hal_invoke_event_intr(spi_slave_hd_hal_context_t* hal, spi_event_t ev)
  202. {
  203. spi_ll_intr_t intr = get_event_intr(hal, ev);
  204. // Workaround for these interrupts not writable
  205. if (hal->intr_not_triggered & ev & (SPI_EV_RECV | SPI_EV_SEND)) {
  206. intr |= SPI_LL_INTR_TRANS_DONE;
  207. }
  208. spi_ll_enable_intr(hal->dev, intr);
  209. }
  210. void spi_slave_hd_hal_read_buffer(spi_slave_hd_hal_context_t *hal, int addr, uint8_t *out_data, size_t len)
  211. {
  212. spi_ll_read_buffer_byte(hal->dev, addr, out_data, len);
  213. }
  214. void spi_slave_hd_hal_write_buffer(spi_slave_hd_hal_context_t *hal, int addr, uint8_t *data, size_t len)
  215. {
  216. spi_ll_write_buffer_byte(hal->dev, addr, data, len);
  217. }
  218. int spi_slave_hd_hal_get_last_addr(spi_slave_hd_hal_context_t *hal)
  219. {
  220. return spi_ll_slave_hd_get_last_addr(hal->dev);
  221. }
  222. int spi_slave_hd_hal_get_rxlen(spi_slave_hd_hal_context_t *hal)
  223. {
  224. //this is by -byte
  225. return spi_ll_slave_get_rx_byte_len(hal->dev);
  226. }
  227. int spi_slave_hd_hal_rxdma_seg_get_len(spi_slave_hd_hal_context_t *hal)
  228. {
  229. lldesc_t* desc = &hal->dmadesc_rx->desc;
  230. return lldesc_get_received_len(desc, NULL);
  231. }
  232. bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans)
  233. {
  234. if ((uint32_t)&hal->tx_dma_head->desc == spi_dma_ll_get_out_eof_desc_addr(hal->dma_out, hal->tx_dma_chan)) {
  235. return false;
  236. }
  237. hal->tx_dma_head = (spi_slave_hd_hal_desc_append_t *)STAILQ_NEXT(&hal->tx_dma_head->desc, qe);
  238. *out_trans = hal->tx_dma_head->arg;
  239. hal->tx_recycled_desc_cnt++;
  240. return true;
  241. }
  242. bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, size_t *out_len)
  243. {
  244. if ((uint32_t)&hal->rx_dma_head->desc == spi_dma_ll_get_in_suc_eof_desc_addr(hal->dma_in, hal->rx_dma_chan)) {
  245. return false;
  246. }
  247. hal->rx_dma_head = (spi_slave_hd_hal_desc_append_t *)STAILQ_NEXT(&hal->rx_dma_head->desc, qe);
  248. *out_trans = hal->rx_dma_head->arg;
  249. *out_len = hal->rx_dma_head->desc.length;
  250. hal->rx_recycled_desc_cnt++;
  251. return true;
  252. }
  253. #if CONFIG_IDF_TARGET_ESP32S2
  254. //Append mode is only supported on ESP32S2 now
  255. static void spi_slave_hd_hal_link_append_desc(spi_slave_hd_hal_desc_append_t *dmadesc, const void *data, int len, bool isrx, void *arg)
  256. {
  257. HAL_ASSERT(len <= LLDESC_MAX_NUM_PER_DESC); //TODO: Add support for transaction with length larger than 4092, IDF-2660
  258. int n = 0;
  259. while (len) {
  260. int dmachunklen = len;
  261. if (dmachunklen > LLDESC_MAX_NUM_PER_DESC) {
  262. dmachunklen = LLDESC_MAX_NUM_PER_DESC;
  263. }
  264. if (isrx) {
  265. //Receive needs DMA length rounded to next 32-bit boundary
  266. dmadesc[n].desc.size = (dmachunklen + 3) & (~3);
  267. dmadesc[n].desc.length = (dmachunklen + 3) & (~3);
  268. } else {
  269. dmadesc[n].desc.size = dmachunklen;
  270. dmadesc[n].desc.length = dmachunklen;
  271. }
  272. dmadesc[n].desc.buf = (uint8_t *)data;
  273. dmadesc[n].desc.eof = 0;
  274. dmadesc[n].desc.sosf = 0;
  275. dmadesc[n].desc.owner = 1;
  276. dmadesc[n].desc.qe.stqe_next = &dmadesc[n + 1].desc;
  277. dmadesc[n].arg = arg;
  278. len -= dmachunklen;
  279. data += dmachunklen;
  280. n++;
  281. }
  282. dmadesc[n - 1].desc.eof = 1; //Mark last DMA desc as end of stream.
  283. dmadesc[n - 1].desc.qe.stqe_next = NULL;
  284. }
  285. esp_err_t spi_slave_hd_hal_txdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg)
  286. {
  287. //Check if there are enough available DMA descriptors for software to use
  288. int num_required = (len + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
  289. int not_recycled_desc_num = hal->tx_used_desc_cnt - hal->tx_recycled_desc_cnt;
  290. int available_desc_num = hal->dma_desc_num - not_recycled_desc_num;
  291. if (num_required > available_desc_num) {
  292. return ESP_ERR_INVALID_STATE;
  293. }
  294. spi_slave_hd_hal_link_append_desc(hal->tx_cur_desc, data, len, false, arg);
  295. if (!hal->tx_dma_started) {
  296. hal->tx_dma_started = true;
  297. //start a link
  298. hal->tx_dma_tail = hal->tx_cur_desc;
  299. spi_ll_clear_intr(hal->dev, SPI_LL_INTR_OUT_EOF);
  300. spi_ll_dma_tx_fifo_reset(hal->dma_out);
  301. spi_ll_outfifo_empty_clr(hal->dev);
  302. spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
  303. spi_ll_dma_tx_enable(hal->dev, 1);
  304. spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, &hal->tx_cur_desc->desc);
  305. } else {
  306. //there is already a consecutive link
  307. STAILQ_NEXT(&hal->tx_dma_tail->desc, qe) = &hal->tx_cur_desc->desc;
  308. hal->tx_dma_tail = hal->tx_cur_desc;
  309. spi_dma_ll_tx_restart(hal->dma_out, hal->tx_dma_chan);
  310. }
  311. //Move the current descriptor pointer according to the number of the linked descriptors
  312. for (int i = 0; i < num_required; i++) {
  313. hal->tx_used_desc_cnt++;
  314. hal->tx_cur_desc++;
  315. if (hal->tx_cur_desc == hal->dmadesc_tx + hal->dma_desc_num) {
  316. hal->tx_cur_desc = hal->dmadesc_tx;
  317. }
  318. }
  319. return ESP_OK;
  320. }
  321. esp_err_t spi_slave_hd_hal_rxdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg)
  322. {
  323. //Check if there are enough available dma descriptors for software to use
  324. int num_required = (len + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
  325. int not_recycled_desc_num = hal->rx_used_desc_cnt - hal->rx_recycled_desc_cnt;
  326. int available_desc_num = hal->dma_desc_num - not_recycled_desc_num;
  327. if (num_required > available_desc_num) {
  328. return ESP_ERR_INVALID_STATE;
  329. }
  330. spi_slave_hd_hal_link_append_desc(hal->rx_cur_desc, data, len, false, arg);
  331. if (!hal->rx_dma_started) {
  332. hal->rx_dma_started = true;
  333. //start a link
  334. hal->rx_dma_tail = hal->rx_cur_desc;
  335. spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD7);
  336. spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
  337. spi_ll_dma_rx_fifo_reset(hal->dma_in);
  338. spi_ll_infifo_full_clr(hal->dev);
  339. spi_ll_dma_rx_enable(hal->dev, 1);
  340. spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, &hal->rx_cur_desc->desc);
  341. } else {
  342. //there is already a consecutive link
  343. STAILQ_NEXT(&hal->rx_dma_tail->desc, qe) = &hal->rx_cur_desc->desc;
  344. hal->rx_dma_tail = hal->rx_cur_desc;
  345. spi_dma_ll_rx_restart(hal->dma_in, hal->rx_dma_chan);
  346. }
  347. //Move the current descriptor pointer according to the number of the linked descriptors
  348. for (int i = 0; i < num_required; i++) {
  349. hal->rx_used_desc_cnt++;
  350. hal->rx_cur_desc++;
  351. if (hal->rx_cur_desc == hal->dmadesc_rx + hal->dma_desc_num) {
  352. hal->rx_cur_desc = hal->dmadesc_rx;
  353. }
  354. }
  355. return ESP_OK;
  356. }
  357. #endif //#if CONFIG_IDF_TARGET_ESP32S2