spi_slave_hd.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /*
  2. * SPDX-FileCopyrightText: 2010-2021 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "esp_log.h"
  7. #include "esp_memory_utils.h"
  8. #include "freertos/FreeRTOS.h"
  9. #include "freertos/semphr.h"
  10. #include "freertos/queue.h"
  11. #include "freertos/ringbuf.h"
  12. #include "driver/gpio.h"
  13. #include "esp_private/spi_common_internal.h"
  14. #include "driver/spi_slave_hd.h"
  15. #include "hal/spi_slave_hd_hal.h"
  16. #if (SOC_SPI_PERIPH_NUM == 2)
  17. #define VALID_HOST(x) ((x) == SPI2_HOST)
  18. #elif (SOC_SPI_PERIPH_NUM == 3)
  19. #define VALID_HOST(x) ((x) >= SPI2_HOST && (x) <= SPI3_HOST)
  20. #endif
  21. #define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
  22. typedef struct {
  23. bool dma_enabled;
  24. int max_transfer_sz;
  25. uint32_t flags;
  26. portMUX_TYPE int_spinlock;
  27. intr_handle_t intr;
  28. intr_handle_t intr_dma;
  29. spi_slave_hd_callback_config_t callback;
  30. spi_slave_hd_hal_context_t hal;
  31. bool append_mode;
  32. QueueHandle_t tx_trans_queue;
  33. QueueHandle_t tx_ret_queue;
  34. QueueHandle_t rx_trans_queue;
  35. QueueHandle_t rx_ret_queue;
  36. QueueHandle_t tx_cnting_sem;
  37. QueueHandle_t rx_cnting_sem;
  38. spi_slave_hd_data_t* tx_desc;
  39. spi_slave_hd_data_t* rx_desc;
  40. #ifdef CONFIG_PM_ENABLE
  41. esp_pm_lock_handle_t pm_lock;
  42. #endif
  43. } spi_slave_hd_slot_t;
  44. static spi_slave_hd_slot_t *spihost[SOC_SPI_PERIPH_NUM];
  45. static const char TAG[] = "slave_hd";
  46. static void spi_slave_hd_intr_segment(void *arg);
  47. #if CONFIG_IDF_TARGET_ESP32S2
  48. //Append mode is only supported on ESP32S2 now
  49. static void spi_slave_hd_intr_append(void *arg);
  50. #endif
  51. esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config,
  52. const spi_slave_hd_slot_config_t *config)
  53. {
  54. bool spi_chan_claimed;
  55. bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
  56. uint32_t actual_tx_dma_chan = 0;
  57. uint32_t actual_rx_dma_chan = 0;
  58. esp_err_t ret = ESP_OK;
  59. SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
  60. #if CONFIG_IDF_TARGET_ESP32S2
  61. SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == (int)host_id || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
  62. #elif SOC_GDMA_SUPPORTED
  63. SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG);
  64. #endif
  65. #if !CONFIG_IDF_TARGET_ESP32S2
  66. //Append mode is only supported on ESP32S2 now
  67. SPIHD_CHECK(append_mode == 0, "Append mode is only supported on ESP32S2 now", ESP_ERR_INVALID_ARG);
  68. #endif
  69. spi_chan_claimed = spicommon_periph_claim(host_id, "slave_hd");
  70. SPIHD_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
  71. spi_slave_hd_slot_t* host = calloc(1, sizeof(spi_slave_hd_slot_t));
  72. if (host == NULL) {
  73. ret = ESP_ERR_NO_MEM;
  74. goto cleanup;
  75. }
  76. spihost[host_id] = host;
  77. host->int_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  78. host->dma_enabled = (config->dma_chan != SPI_DMA_DISABLED);
  79. if (host->dma_enabled) {
  80. ret = spicommon_dma_chan_alloc(host_id, config->dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
  81. if (ret != ESP_OK) {
  82. goto cleanup;
  83. }
  84. }
  85. ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &host->flags);
  86. if (ret != ESP_OK) {
  87. goto cleanup;
  88. }
  89. gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
  90. spicommon_cs_initialize(host_id, config->spics_io_num, 0,
  91. !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
  92. host->append_mode = append_mode;
  93. spi_slave_hd_hal_config_t hal_config = {
  94. .host_id = host_id,
  95. .dma_in = SPI_LL_GET_HW(host_id),
  96. .dma_out = SPI_LL_GET_HW(host_id),
  97. .dma_enabled = host->dma_enabled,
  98. .tx_dma_chan = actual_tx_dma_chan,
  99. .rx_dma_chan = actual_rx_dma_chan,
  100. .append_mode = append_mode,
  101. .mode = config->mode,
  102. .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
  103. .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
  104. };
  105. if (host->dma_enabled) {
  106. //Malloc for all the DMA descriptors
  107. uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size(&host->hal, bus_config->max_transfer_sz);
  108. host->hal.dmadesc_tx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
  109. host->hal.dmadesc_rx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
  110. if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx) {
  111. ret = ESP_ERR_NO_MEM;
  112. goto cleanup;
  113. }
  114. //Get the actual SPI bus transaction size in bytes.
  115. host->max_transfer_sz = spi_salve_hd_hal_get_max_bus_size(&host->hal);
  116. } else {
  117. //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
  118. host->max_transfer_sz = 0;
  119. }
  120. //Init the hal according to the hal_config set above
  121. spi_slave_hd_hal_init(&host->hal, &hal_config);
  122. #ifdef CONFIG_PM_ENABLE
  123. ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", &host->pm_lock);
  124. if (ret != ESP_OK) {
  125. goto cleanup;
  126. }
  127. // Lock APB frequency while SPI slave driver is in use
  128. esp_pm_lock_acquire(host->pm_lock);
  129. #endif //CONFIG_PM_ENABLE
  130. //Create Queues and Semaphores
  131. host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  132. host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  133. if (!host->append_mode) {
  134. host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  135. host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  136. if (!host->tx_trans_queue || !host->rx_trans_queue) {
  137. ret = ESP_ERR_NO_MEM;
  138. goto cleanup;
  139. }
  140. }
  141. #if CONFIG_IDF_TARGET_ESP32S2
  142. //Append mode is only supported on ESP32S2 now
  143. else {
  144. host->tx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
  145. host->rx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
  146. if (!host->tx_cnting_sem || !host->rx_cnting_sem) {
  147. ret = ESP_ERR_NO_MEM;
  148. goto cleanup;
  149. }
  150. }
  151. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  152. //Alloc intr
  153. if (!host->append_mode) {
  154. ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_segment,
  155. (void *)host, &host->intr);
  156. if (ret != ESP_OK) {
  157. goto cleanup;
  158. }
  159. ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_segment,
  160. (void *)host, &host->intr_dma);
  161. if (ret != ESP_OK) {
  162. goto cleanup;
  163. }
  164. }
  165. #if CONFIG_IDF_TARGET_ESP32S2
  166. //Append mode is only supported on ESP32S2 now
  167. else {
  168. ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_append,
  169. (void *)host, &host->intr);
  170. if (ret != ESP_OK) {
  171. goto cleanup;
  172. }
  173. ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
  174. (void *)host, &host->intr_dma);
  175. if (ret != ESP_OK) {
  176. goto cleanup;
  177. }
  178. }
  179. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  180. //Init callbacks
  181. memcpy((uint8_t*)&host->callback, (uint8_t*)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
  182. spi_event_t event = 0;
  183. if (host->callback.cb_buffer_tx!=NULL) event |= SPI_EV_BUF_TX;
  184. if (host->callback.cb_buffer_rx!=NULL) event |= SPI_EV_BUF_RX;
  185. if (host->callback.cb_cmd9!=NULL) event |= SPI_EV_CMD9;
  186. if (host->callback.cb_cmdA!=NULL) event |= SPI_EV_CMDA;
  187. spi_slave_hd_hal_enable_event_intr(&host->hal, event);
  188. return ESP_OK;
  189. cleanup:
  190. // Memory free is in the deinit function
  191. spi_slave_hd_deinit(host_id);
  192. return ret;
  193. }
  194. esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
  195. {
  196. spi_slave_hd_slot_t *host = spihost[host_id];
  197. if (host == NULL) return ESP_ERR_INVALID_ARG;
  198. if (host->tx_trans_queue) vQueueDelete(host->tx_trans_queue);
  199. if (host->tx_ret_queue) vQueueDelete(host->tx_ret_queue);
  200. if (host->rx_trans_queue) vQueueDelete(host->rx_trans_queue);
  201. if (host->rx_ret_queue) vQueueDelete(host->rx_ret_queue);
  202. if (host->tx_cnting_sem) vSemaphoreDelete(host->tx_cnting_sem);
  203. if (host->rx_cnting_sem) vSemaphoreDelete(host->rx_cnting_sem);
  204. if (host) {
  205. free(host->hal.dmadesc_tx);
  206. free(host->hal.dmadesc_rx);
  207. esp_intr_free(host->intr);
  208. esp_intr_free(host->intr_dma);
  209. #ifdef CONFIG_PM_ENABLE
  210. if (host->pm_lock) {
  211. esp_pm_lock_release(host->pm_lock);
  212. esp_pm_lock_delete(host->pm_lock);
  213. }
  214. #endif
  215. }
  216. spicommon_periph_free(host_id);
  217. if (host->dma_enabled) {
  218. spicommon_dma_chan_free(host_id);
  219. }
  220. free(host);
  221. spihost[host_id] = NULL;
  222. return ESP_OK;
  223. }
  224. static void tx_invoke(spi_slave_hd_slot_t* host)
  225. {
  226. portENTER_CRITICAL(&host->int_spinlock);
  227. spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_SEND);
  228. portEXIT_CRITICAL(&host->int_spinlock);
  229. }
  230. static void rx_invoke(spi_slave_hd_slot_t* host)
  231. {
  232. portENTER_CRITICAL(&host->int_spinlock);
  233. spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_RECV);
  234. portEXIT_CRITICAL(&host->int_spinlock);
  235. }
  236. static inline IRAM_ATTR BaseType_t intr_check_clear_callback(spi_slave_hd_slot_t* host, spi_event_t ev, slave_cb_t cb)
  237. {
  238. BaseType_t cb_awoken = pdFALSE;
  239. if (spi_slave_hd_hal_check_clear_event(&host->hal, ev) && cb) {
  240. spi_slave_hd_event_t event = {.event = ev};
  241. cb(host->callback.arg, &event, &cb_awoken);
  242. }
  243. return cb_awoken;
  244. }
  245. static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
  246. {
  247. spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
  248. spi_slave_hd_callback_config_t *callback = &host->callback;
  249. spi_slave_hd_hal_context_t *hal = &host->hal;
  250. BaseType_t awoken = pdFALSE;
  251. BaseType_t ret;
  252. awoken |= intr_check_clear_callback(host, SPI_EV_BUF_TX, callback->cb_buffer_tx);
  253. awoken |= intr_check_clear_callback(host, SPI_EV_BUF_RX, callback->cb_buffer_rx);
  254. awoken |= intr_check_clear_callback(host, SPI_EV_CMD9, callback->cb_cmd9);
  255. awoken |= intr_check_clear_callback(host, SPI_EV_CMDA, callback->cb_cmdA);
  256. bool tx_done = false;
  257. bool rx_done = false;
  258. portENTER_CRITICAL_ISR(&host->int_spinlock);
  259. if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
  260. tx_done = true;
  261. }
  262. if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
  263. rx_done = true;
  264. }
  265. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  266. if (tx_done) {
  267. bool ret_queue = true;
  268. if (callback->cb_sent) {
  269. spi_slave_hd_event_t ev = {
  270. .event = SPI_EV_SEND,
  271. .trans = host->tx_desc,
  272. };
  273. BaseType_t cb_awoken = pdFALSE;
  274. ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
  275. awoken |= cb_awoken;
  276. }
  277. if (ret_queue) {
  278. ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_desc, &awoken);
  279. // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
  280. assert(ret == pdTRUE);
  281. }
  282. host->tx_desc = NULL;
  283. }
  284. if (rx_done) {
  285. bool ret_queue = true;
  286. host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
  287. if (callback->cb_recv) {
  288. spi_slave_hd_event_t ev = {
  289. .event = SPI_EV_RECV,
  290. .trans = host->rx_desc,
  291. };
  292. BaseType_t cb_awoken = pdFALSE;
  293. ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
  294. awoken |= cb_awoken;
  295. }
  296. if (ret_queue) {
  297. ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_desc, &awoken);
  298. // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
  299. assert(ret == pdTRUE);
  300. }
  301. host->rx_desc = NULL;
  302. }
  303. bool tx_sent = false;
  304. bool rx_sent = false;
  305. if (!host->tx_desc) {
  306. ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
  307. if (ret == pdTRUE) {
  308. spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
  309. tx_sent = true;
  310. if (callback->cb_send_dma_ready) {
  311. spi_slave_hd_event_t ev = {
  312. .event = SPI_EV_SEND_DMA_READY,
  313. .trans = host->tx_desc,
  314. };
  315. BaseType_t cb_awoken = pdFALSE;
  316. callback->cb_send_dma_ready(callback->arg, &ev, &cb_awoken);
  317. awoken |= cb_awoken;
  318. }
  319. }
  320. }
  321. if (!host->rx_desc) {
  322. ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
  323. if (ret == pdTRUE) {
  324. spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
  325. rx_sent = true;
  326. if (callback->cb_recv_dma_ready) {
  327. spi_slave_hd_event_t ev = {
  328. .event = SPI_EV_RECV_DMA_READY,
  329. .trans = host->rx_desc,
  330. };
  331. BaseType_t cb_awoken = pdFALSE;
  332. callback->cb_recv_dma_ready(callback->arg, &ev, &cb_awoken);
  333. awoken |= cb_awoken;
  334. }
  335. }
  336. }
  337. portENTER_CRITICAL_ISR(&host->int_spinlock);
  338. if (tx_sent) {
  339. spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_SEND);
  340. }
  341. if (rx_sent) {
  342. spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_RECV);
  343. }
  344. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  345. if (awoken==pdTRUE) portYIELD_FROM_ISR();
  346. }
  347. #if CONFIG_IDF_TARGET_ESP32S2
  348. //Append mode is only supported on ESP32S2 now
  349. static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
  350. {
  351. spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
  352. spi_slave_hd_callback_config_t *callback = &host->callback;
  353. spi_slave_hd_hal_context_t *hal = &host->hal;
  354. BaseType_t awoken = pdFALSE;
  355. BaseType_t ret __attribute__((unused));
  356. bool tx_done = false;
  357. bool rx_done = false;
  358. portENTER_CRITICAL_ISR(&host->int_spinlock);
  359. if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_SEND)) {
  360. tx_done = true;
  361. }
  362. if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_RECV)) {
  363. rx_done = true;
  364. }
  365. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  366. if (tx_done) {
  367. spi_slave_hd_data_t *trans_desc;
  368. while (1) {
  369. bool trans_finish = false;
  370. trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
  371. if (!trans_finish) {
  372. break;
  373. }
  374. bool ret_queue = true;
  375. if (callback->cb_sent) {
  376. spi_slave_hd_event_t ev = {
  377. .event = SPI_EV_SEND,
  378. .trans = trans_desc,
  379. };
  380. BaseType_t cb_awoken = pdFALSE;
  381. ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
  382. awoken |= cb_awoken;
  383. }
  384. if (ret_queue) {
  385. ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
  386. assert(ret == pdTRUE);
  387. ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
  388. assert(ret == pdTRUE);
  389. }
  390. }
  391. }
  392. if (rx_done) {
  393. spi_slave_hd_data_t *trans_desc;
  394. size_t trans_len;
  395. while (1) {
  396. bool trans_finish = false;
  397. trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
  398. if (!trans_finish) {
  399. break;
  400. }
  401. trans_desc->trans_len = trans_len;
  402. bool ret_queue = true;
  403. if (callback->cb_recv) {
  404. spi_slave_hd_event_t ev = {
  405. .event = SPI_EV_RECV,
  406. .trans = trans_desc,
  407. };
  408. BaseType_t cb_awoken = pdFALSE;
  409. ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
  410. awoken |= cb_awoken;
  411. }
  412. if (ret_queue) {
  413. ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
  414. assert(ret == pdTRUE);
  415. ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
  416. assert(ret == pdTRUE);
  417. }
  418. }
  419. }
  420. if (awoken==pdTRUE) portYIELD_FROM_ISR();
  421. }
  422. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  423. static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
  424. {
  425. spi_slave_hd_slot_t *host = spihost[host_id];
  426. spi_slave_hd_data_t *trans;
  427. BaseType_t ret;
  428. if (chan == SPI_SLAVE_CHAN_TX) {
  429. ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
  430. } else {
  431. ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
  432. }
  433. if (ret == pdFALSE) {
  434. return ESP_ERR_TIMEOUT;
  435. }
  436. *out_trans = trans;
  437. return ESP_OK;
  438. }
  439. //---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
  440. esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t* trans, TickType_t timeout)
  441. {
  442. spi_slave_hd_slot_t* host = spihost[host_id];
  443. SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
  444. SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
  445. SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
  446. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  447. if (chan == SPI_SLAVE_CHAN_TX) {
  448. BaseType_t ret = xQueueSend(host->tx_trans_queue, &trans, timeout);
  449. if (ret == pdFALSE) {
  450. return ESP_ERR_TIMEOUT;
  451. }
  452. tx_invoke(host);
  453. } else { //chan == SPI_SLAVE_CHAN_RX
  454. BaseType_t ret = xQueueSend(host->rx_trans_queue, &trans, timeout);
  455. if (ret == pdFALSE) {
  456. return ESP_ERR_TIMEOUT;
  457. }
  458. rx_invoke(host);
  459. }
  460. return ESP_OK;
  461. }
  462. esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t** out_trans, TickType_t timeout)
  463. {
  464. esp_err_t ret;
  465. spi_slave_hd_slot_t* host = spihost[host_id];
  466. SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
  467. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  468. ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
  469. return ret;
  470. }
  471. void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_data, size_t len)
  472. {
  473. spi_slave_hd_hal_read_buffer(&spihost[host_id]->hal, addr, out_data, len);
  474. }
  475. void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *data, size_t len)
  476. {
  477. spi_slave_hd_hal_write_buffer(&spihost[host_id]->hal, addr, data, len);
  478. }
  479. #if CONFIG_IDF_TARGET_ESP32S2
  480. //Append mode is only supported on ESP32S2 now
  481. //---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
  482. esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
  483. {
  484. esp_err_t err;
  485. spi_slave_hd_slot_t *host = spihost[host_id];
  486. spi_slave_hd_hal_context_t *hal = &host->hal;
  487. SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
  488. SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
  489. SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
  490. SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
  491. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  492. if (chan == SPI_SLAVE_CHAN_TX) {
  493. BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
  494. if (ret == pdFALSE) {
  495. return ESP_ERR_TIMEOUT;
  496. }
  497. err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
  498. } else {
  499. BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
  500. if (ret == pdFALSE) {
  501. return ESP_ERR_TIMEOUT;
  502. }
  503. err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
  504. }
  505. if (err != ESP_OK) {
  506. ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");
  507. }
  508. return err;
  509. }
  510. esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
  511. {
  512. esp_err_t ret;
  513. spi_slave_hd_slot_t* host = spihost[host_id];
  514. SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
  515. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  516. ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
  517. return ret;
  518. }
  519. #endif //#if CONFIG_IDF_TARGET_ESP32S2