spi_slave_hd.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * SPDX-FileCopyrightText: 2010-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "esp_log.h"
  7. #include "freertos/FreeRTOS.h"
  8. #include "freertos/semphr.h"
  9. #include "freertos/queue.h"
  10. #include "freertos/ringbuf.h"
  11. #include "driver/gpio.h"
  12. #include "driver/spi_common_internal.h"
  13. #include "driver/spi_slave_hd.h"
  14. #include "hal/spi_slave_hd_hal.h"
  15. #if (SOC_SPI_PERIPH_NUM == 2)
  16. #define VALID_HOST(x) ((x) == SPI2_HOST)
  17. #elif (SOC_SPI_PERIPH_NUM == 3)
  18. #define VALID_HOST(x) ((x) >= SPI2_HOST && (x) <= SPI3_HOST)
  19. #endif
  20. #define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
  21. typedef struct {
  22. bool dma_enabled;
  23. int max_transfer_sz;
  24. uint32_t flags;
  25. portMUX_TYPE int_spinlock;
  26. intr_handle_t intr;
  27. intr_handle_t intr_dma;
  28. spi_slave_hd_callback_config_t callback;
  29. spi_slave_hd_hal_context_t hal;
  30. bool append_mode;
  31. QueueHandle_t tx_trans_queue;
  32. QueueHandle_t tx_ret_queue;
  33. QueueHandle_t rx_trans_queue;
  34. QueueHandle_t rx_ret_queue;
  35. QueueHandle_t tx_cnting_sem;
  36. QueueHandle_t rx_cnting_sem;
  37. spi_slave_hd_data_t* tx_desc;
  38. spi_slave_hd_data_t* rx_desc;
  39. #ifdef CONFIG_PM_ENABLE
  40. esp_pm_lock_handle_t pm_lock;
  41. #endif
  42. } spi_slave_hd_slot_t;
  43. static spi_slave_hd_slot_t *spihost[SOC_SPI_PERIPH_NUM];
  44. static const char TAG[] = "slave_hd";
  45. static void spi_slave_hd_intr_segment(void *arg);
  46. #if CONFIG_IDF_TARGET_ESP32S2
  47. //Append mode is only supported on ESP32S2 now
  48. static void spi_slave_hd_intr_append(void *arg);
  49. #endif
  50. esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config,
  51. const spi_slave_hd_slot_config_t *config)
  52. {
  53. bool spi_chan_claimed;
  54. bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
  55. uint32_t actual_tx_dma_chan = 0;
  56. uint32_t actual_rx_dma_chan = 0;
  57. esp_err_t ret = ESP_OK;
  58. SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
  59. #if CONFIG_IDF_TARGET_ESP32S2
  60. SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == (int)host_id || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
  61. #elif SOC_GDMA_SUPPORTED
  62. SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG);
  63. #endif
  64. #if !CONFIG_IDF_TARGET_ESP32S2
  65. //Append mode is only supported on ESP32S2 now
  66. SPIHD_CHECK(append_mode == 0, "Append mode is only supported on ESP32S2 now", ESP_ERR_INVALID_ARG);
  67. #endif
  68. spi_chan_claimed = spicommon_periph_claim(host_id, "slave_hd");
  69. SPIHD_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
  70. spi_slave_hd_slot_t* host = calloc(1, sizeof(spi_slave_hd_slot_t));
  71. if (host == NULL) {
  72. ret = ESP_ERR_NO_MEM;
  73. goto cleanup;
  74. }
  75. spihost[host_id] = host;
  76. host->int_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  77. host->dma_enabled = (config->dma_chan != SPI_DMA_DISABLED);
  78. if (host->dma_enabled) {
  79. ret = spicommon_slave_dma_chan_alloc(host_id, config->dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
  80. if (ret != ESP_OK) {
  81. goto cleanup;
  82. }
  83. }
  84. ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &host->flags);
  85. if (ret != ESP_OK) {
  86. goto cleanup;
  87. }
  88. gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
  89. spicommon_cs_initialize(host_id, config->spics_io_num, 0,
  90. !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
  91. host->append_mode = append_mode;
  92. spi_slave_hd_hal_config_t hal_config = {
  93. .host_id = host_id,
  94. .dma_in = SPI_LL_GET_HW(host_id),
  95. .dma_out = SPI_LL_GET_HW(host_id),
  96. .dma_enabled = host->dma_enabled,
  97. .tx_dma_chan = actual_tx_dma_chan,
  98. .rx_dma_chan = actual_rx_dma_chan,
  99. .append_mode = append_mode,
  100. .mode = config->mode,
  101. .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
  102. .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
  103. };
  104. if (host->dma_enabled) {
  105. //Malloc for all the DMA descriptors
  106. uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size(&host->hal, bus_config->max_transfer_sz);
  107. host->hal.dmadesc_tx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
  108. host->hal.dmadesc_rx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
  109. if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx) {
  110. ret = ESP_ERR_NO_MEM;
  111. goto cleanup;
  112. }
  113. //Get the actual SPI bus transaction size in bytes.
  114. host->max_transfer_sz = spi_salve_hd_hal_get_max_bus_size(&host->hal);
  115. } else {
  116. //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
  117. host->max_transfer_sz = 0;
  118. }
  119. //Init the hal according to the hal_config set above
  120. spi_slave_hd_hal_init(&host->hal, &hal_config);
  121. #ifdef CONFIG_PM_ENABLE
  122. ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", &host->pm_lock);
  123. if (ret != ESP_OK) {
  124. goto cleanup;
  125. }
  126. // Lock APB frequency while SPI slave driver is in use
  127. esp_pm_lock_acquire(host->pm_lock);
  128. #endif //CONFIG_PM_ENABLE
  129. //Create Queues and Semaphores
  130. host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  131. host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  132. if (!host->append_mode) {
  133. host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  134. host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  135. if (!host->tx_trans_queue || !host->rx_trans_queue) {
  136. ret = ESP_ERR_NO_MEM;
  137. goto cleanup;
  138. }
  139. }
  140. #if CONFIG_IDF_TARGET_ESP32S2
  141. //Append mode is only supported on ESP32S2 now
  142. else {
  143. host->tx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
  144. host->rx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
  145. if (!host->tx_cnting_sem || !host->rx_cnting_sem) {
  146. ret = ESP_ERR_NO_MEM;
  147. goto cleanup;
  148. }
  149. }
  150. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  151. //Alloc intr
  152. if (!host->append_mode) {
  153. ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_segment,
  154. (void *)host, &host->intr);
  155. if (ret != ESP_OK) {
  156. goto cleanup;
  157. }
  158. ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_segment,
  159. (void *)host, &host->intr_dma);
  160. if (ret != ESP_OK) {
  161. goto cleanup;
  162. }
  163. }
  164. #if CONFIG_IDF_TARGET_ESP32S2
  165. //Append mode is only supported on ESP32S2 now
  166. else {
  167. ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_append,
  168. (void *)host, &host->intr);
  169. if (ret != ESP_OK) {
  170. goto cleanup;
  171. }
  172. ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
  173. (void *)host, &host->intr_dma);
  174. if (ret != ESP_OK) {
  175. goto cleanup;
  176. }
  177. }
  178. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  179. //Init callbacks
  180. memcpy((uint8_t*)&host->callback, (uint8_t*)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
  181. spi_event_t event = 0;
  182. if (host->callback.cb_buffer_tx!=NULL) event |= SPI_EV_BUF_TX;
  183. if (host->callback.cb_buffer_rx!=NULL) event |= SPI_EV_BUF_RX;
  184. if (host->callback.cb_cmd9!=NULL) event |= SPI_EV_CMD9;
  185. if (host->callback.cb_cmdA!=NULL) event |= SPI_EV_CMDA;
  186. spi_slave_hd_hal_enable_event_intr(&host->hal, event);
  187. return ESP_OK;
  188. cleanup:
  189. // Memory free is in the deinit function
  190. spi_slave_hd_deinit(host_id);
  191. return ret;
  192. }
  193. esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
  194. {
  195. spi_slave_hd_slot_t *host = spihost[host_id];
  196. if (host == NULL) return ESP_ERR_INVALID_ARG;
  197. if (host->tx_trans_queue) vQueueDelete(host->tx_trans_queue);
  198. if (host->tx_ret_queue) vQueueDelete(host->tx_ret_queue);
  199. if (host->rx_trans_queue) vQueueDelete(host->rx_trans_queue);
  200. if (host->rx_ret_queue) vQueueDelete(host->rx_ret_queue);
  201. if (host->tx_cnting_sem) vSemaphoreDelete(host->tx_cnting_sem);
  202. if (host->rx_cnting_sem) vSemaphoreDelete(host->rx_cnting_sem);
  203. if (host) {
  204. free(host->hal.dmadesc_tx);
  205. free(host->hal.dmadesc_rx);
  206. esp_intr_free(host->intr);
  207. esp_intr_free(host->intr_dma);
  208. #ifdef CONFIG_PM_ENABLE
  209. if (host->pm_lock) {
  210. esp_pm_lock_release(host->pm_lock);
  211. esp_pm_lock_delete(host->pm_lock);
  212. }
  213. #endif
  214. }
  215. spicommon_periph_free(host_id);
  216. if (host->dma_enabled) {
  217. spicommon_slave_free_dma(host_id);
  218. }
  219. free(host);
  220. spihost[host_id] = NULL;
  221. return ESP_OK;
  222. }
  223. static void tx_invoke(spi_slave_hd_slot_t* host)
  224. {
  225. portENTER_CRITICAL(&host->int_spinlock);
  226. spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_SEND);
  227. portEXIT_CRITICAL(&host->int_spinlock);
  228. }
  229. static void rx_invoke(spi_slave_hd_slot_t* host)
  230. {
  231. portENTER_CRITICAL(&host->int_spinlock);
  232. spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_RECV);
  233. portEXIT_CRITICAL(&host->int_spinlock);
  234. }
  235. static inline IRAM_ATTR BaseType_t intr_check_clear_callback(spi_slave_hd_slot_t* host, spi_event_t ev, slave_cb_t cb)
  236. {
  237. BaseType_t cb_awoken = pdFALSE;
  238. if (spi_slave_hd_hal_check_clear_event(&host->hal, ev) && cb) {
  239. spi_slave_hd_event_t event = {.event = ev};
  240. cb(host->callback.arg, &event, &cb_awoken);
  241. }
  242. return cb_awoken;
  243. }
  244. static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
  245. {
  246. spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
  247. spi_slave_hd_callback_config_t *callback = &host->callback;
  248. spi_slave_hd_hal_context_t *hal = &host->hal;
  249. BaseType_t awoken = pdFALSE;
  250. BaseType_t ret;
  251. awoken |= intr_check_clear_callback(host, SPI_EV_BUF_TX, callback->cb_buffer_tx);
  252. awoken |= intr_check_clear_callback(host, SPI_EV_BUF_RX, callback->cb_buffer_rx);
  253. awoken |= intr_check_clear_callback(host, SPI_EV_CMD9, callback->cb_cmd9);
  254. awoken |= intr_check_clear_callback(host, SPI_EV_CMDA, callback->cb_cmdA);
  255. bool tx_done = false;
  256. bool rx_done = false;
  257. portENTER_CRITICAL_ISR(&host->int_spinlock);
  258. if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
  259. tx_done = true;
  260. }
  261. if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
  262. rx_done = true;
  263. }
  264. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  265. if (tx_done) {
  266. bool ret_queue = true;
  267. if (callback->cb_sent) {
  268. spi_slave_hd_event_t ev = {
  269. .event = SPI_EV_SEND,
  270. .trans = host->tx_desc,
  271. };
  272. BaseType_t cb_awoken = pdFALSE;
  273. ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
  274. awoken |= cb_awoken;
  275. }
  276. if (ret_queue) {
  277. ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_desc, &awoken);
  278. // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
  279. assert(ret == pdTRUE);
  280. }
  281. host->tx_desc = NULL;
  282. }
  283. if (rx_done) {
  284. bool ret_queue = true;
  285. host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
  286. if (callback->cb_recv) {
  287. spi_slave_hd_event_t ev = {
  288. .event = SPI_EV_RECV,
  289. .trans = host->rx_desc,
  290. };
  291. BaseType_t cb_awoken = pdFALSE;
  292. ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
  293. awoken |= cb_awoken;
  294. }
  295. if (ret_queue) {
  296. ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_desc, &awoken);
  297. // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
  298. assert(ret == pdTRUE);
  299. }
  300. host->rx_desc = NULL;
  301. }
  302. bool tx_sent = false;
  303. bool rx_sent = false;
  304. if (!host->tx_desc) {
  305. ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
  306. if (ret == pdTRUE) {
  307. spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
  308. tx_sent = true;
  309. if (callback->cb_send_dma_ready) {
  310. spi_slave_hd_event_t ev = {
  311. .event = SPI_EV_SEND_DMA_READY,
  312. .trans = host->tx_desc,
  313. };
  314. BaseType_t cb_awoken = pdFALSE;
  315. callback->cb_send_dma_ready(callback->arg, &ev, &cb_awoken);
  316. awoken |= cb_awoken;
  317. }
  318. }
  319. }
  320. if (!host->rx_desc) {
  321. ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
  322. if (ret == pdTRUE) {
  323. spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
  324. rx_sent = true;
  325. if (callback->cb_recv_dma_ready) {
  326. spi_slave_hd_event_t ev = {
  327. .event = SPI_EV_RECV_DMA_READY,
  328. .trans = host->rx_desc,
  329. };
  330. BaseType_t cb_awoken = pdFALSE;
  331. callback->cb_recv_dma_ready(callback->arg, &ev, &cb_awoken);
  332. awoken |= cb_awoken;
  333. }
  334. }
  335. }
  336. portENTER_CRITICAL_ISR(&host->int_spinlock);
  337. if (tx_sent) {
  338. spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_SEND);
  339. }
  340. if (rx_sent) {
  341. spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_RECV);
  342. }
  343. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  344. if (awoken==pdTRUE) portYIELD_FROM_ISR();
  345. }
  346. #if CONFIG_IDF_TARGET_ESP32S2
  347. //Append mode is only supported on ESP32S2 now
  348. static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
  349. {
  350. spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
  351. spi_slave_hd_callback_config_t *callback = &host->callback;
  352. spi_slave_hd_hal_context_t *hal = &host->hal;
  353. BaseType_t awoken = pdFALSE;
  354. BaseType_t ret __attribute__((unused));
  355. bool tx_done = false;
  356. bool rx_done = false;
  357. portENTER_CRITICAL_ISR(&host->int_spinlock);
  358. if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_SEND)) {
  359. tx_done = true;
  360. }
  361. if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_RECV)) {
  362. rx_done = true;
  363. }
  364. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  365. if (tx_done) {
  366. spi_slave_hd_data_t *trans_desc;
  367. while (1) {
  368. bool trans_finish = false;
  369. trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
  370. if (!trans_finish) {
  371. break;
  372. }
  373. bool ret_queue = true;
  374. if (callback->cb_sent) {
  375. spi_slave_hd_event_t ev = {
  376. .event = SPI_EV_SEND,
  377. .trans = trans_desc,
  378. };
  379. BaseType_t cb_awoken = pdFALSE;
  380. ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
  381. awoken |= cb_awoken;
  382. }
  383. if (ret_queue) {
  384. ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
  385. assert(ret == pdTRUE);
  386. ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
  387. assert(ret == pdTRUE);
  388. }
  389. }
  390. }
  391. if (rx_done) {
  392. spi_slave_hd_data_t *trans_desc;
  393. size_t trans_len;
  394. while (1) {
  395. bool trans_finish = false;
  396. trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
  397. if (!trans_finish) {
  398. break;
  399. }
  400. trans_desc->trans_len = trans_len;
  401. bool ret_queue = true;
  402. if (callback->cb_recv) {
  403. spi_slave_hd_event_t ev = {
  404. .event = SPI_EV_RECV,
  405. .trans = trans_desc,
  406. };
  407. BaseType_t cb_awoken = pdFALSE;
  408. ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
  409. awoken |= cb_awoken;
  410. }
  411. if (ret_queue) {
  412. ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
  413. assert(ret == pdTRUE);
  414. ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
  415. assert(ret == pdTRUE);
  416. }
  417. }
  418. }
  419. if (awoken==pdTRUE) portYIELD_FROM_ISR();
  420. }
  421. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  422. static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
  423. {
  424. spi_slave_hd_slot_t *host = spihost[host_id];
  425. spi_slave_hd_data_t *trans;
  426. BaseType_t ret;
  427. if (chan == SPI_SLAVE_CHAN_TX) {
  428. ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
  429. } else {
  430. ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
  431. }
  432. if (ret == pdFALSE) {
  433. return ESP_ERR_TIMEOUT;
  434. }
  435. *out_trans = trans;
  436. return ESP_OK;
  437. }
  438. //---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
  439. esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t* trans, TickType_t timeout)
  440. {
  441. spi_slave_hd_slot_t* host = spihost[host_id];
  442. SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
  443. SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
  444. SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
  445. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  446. if (chan == SPI_SLAVE_CHAN_TX) {
  447. BaseType_t ret = xQueueSend(host->tx_trans_queue, &trans, timeout);
  448. if (ret == pdFALSE) {
  449. return ESP_ERR_TIMEOUT;
  450. }
  451. tx_invoke(host);
  452. } else { //chan == SPI_SLAVE_CHAN_RX
  453. BaseType_t ret = xQueueSend(host->rx_trans_queue, &trans, timeout);
  454. if (ret == pdFALSE) {
  455. return ESP_ERR_TIMEOUT;
  456. }
  457. rx_invoke(host);
  458. }
  459. return ESP_OK;
  460. }
  461. esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t** out_trans, TickType_t timeout)
  462. {
  463. esp_err_t ret;
  464. spi_slave_hd_slot_t* host = spihost[host_id];
  465. SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
  466. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  467. ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
  468. return ret;
  469. }
  470. void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_data, size_t len)
  471. {
  472. spi_slave_hd_hal_read_buffer(&spihost[host_id]->hal, addr, out_data, len);
  473. }
  474. void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *data, size_t len)
  475. {
  476. spi_slave_hd_hal_write_buffer(&spihost[host_id]->hal, addr, data, len);
  477. }
  478. #if CONFIG_IDF_TARGET_ESP32S2
  479. //Append mode is only supported on ESP32S2 now
  480. //---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
  481. esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
  482. {
  483. esp_err_t err;
  484. spi_slave_hd_slot_t *host = spihost[host_id];
  485. spi_slave_hd_hal_context_t *hal = &host->hal;
  486. SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
  487. SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
  488. SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
  489. SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
  490. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  491. if (chan == SPI_SLAVE_CHAN_TX) {
  492. BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
  493. if (ret == pdFALSE) {
  494. return ESP_ERR_TIMEOUT;
  495. }
  496. err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
  497. } else {
  498. BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
  499. if (ret == pdFALSE) {
  500. return ESP_ERR_TIMEOUT;
  501. }
  502. err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
  503. }
  504. if (err != ESP_OK) {
  505. ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");
  506. }
  507. return err;
  508. }
  509. esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
  510. {
  511. esp_err_t ret;
  512. spi_slave_hd_slot_t* host = spihost[host_id];
  513. SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
  514. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  515. ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
  516. return ret;
  517. }
  518. #endif //#if CONFIG_IDF_TARGET_ESP32S2