spi_slave_hd.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. // Copyright 2010-2020 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "esp_log.h"
  15. #include "freertos/FreeRTOS.h"
  16. #include "freertos/semphr.h"
  17. #include "freertos/queue.h"
  18. #include "freertos/ringbuf.h"
  19. #include "driver/gpio.h"
  20. #include "driver/spi_common_internal.h"
  21. #include "driver/spi_slave_hd.h"
  22. #include "hal/spi_slave_hd_hal.h"
  23. #if (SOC_SPI_PERIPH_NUM == 2)
  24. #define VALID_HOST(x) ((x) == SPI2_HOST)
  25. #elif (SOC_SPI_PERIPH_NUM == 3)
  26. #define VALID_HOST(x) ((x) >= SPI2_HOST && (x) <= SPI3_HOST)
  27. #endif
  28. #define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
  29. typedef struct {
  30. bool dma_enabled;
  31. int max_transfer_sz;
  32. uint32_t flags;
  33. portMUX_TYPE int_spinlock;
  34. intr_handle_t intr;
  35. intr_handle_t intr_dma;
  36. spi_slave_hd_callback_config_t callback;
  37. spi_slave_hd_hal_context_t hal;
  38. bool append_mode;
  39. QueueHandle_t tx_trans_queue;
  40. QueueHandle_t tx_ret_queue;
  41. QueueHandle_t rx_trans_queue;
  42. QueueHandle_t rx_ret_queue;
  43. QueueHandle_t tx_cnting_sem;
  44. QueueHandle_t rx_cnting_sem;
  45. spi_slave_hd_data_t* tx_desc;
  46. spi_slave_hd_data_t* rx_desc;
  47. #ifdef CONFIG_PM_ENABLE
  48. esp_pm_lock_handle_t pm_lock;
  49. #endif
  50. } spi_slave_hd_slot_t;
  51. static spi_slave_hd_slot_t *spihost[SOC_SPI_PERIPH_NUM];
  52. static const char TAG[] = "slave_hd";
  53. static void spi_slave_hd_intr_segment(void *arg);
  54. #if CONFIG_IDF_TARGET_ESP32S2
  55. //Append mode is only supported on ESP32S2 now
  56. static void spi_slave_hd_intr_append(void *arg);
  57. #endif
  58. esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config,
  59. const spi_slave_hd_slot_config_t *config)
  60. {
  61. bool spi_chan_claimed;
  62. bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
  63. uint32_t actual_tx_dma_chan = 0;
  64. uint32_t actual_rx_dma_chan = 0;
  65. esp_err_t ret = ESP_OK;
  66. SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
  67. #if CONFIG_IDF_TARGET_ESP32S2
  68. SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == (int)host_id || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG);
  69. #elif SOC_GDMA_SUPPORTED
  70. SPIHD_CHECK(config->dma_chan == SPI_DMA_DISABLED || config->dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG);
  71. #endif
  72. #if !CONFIG_IDF_TARGET_ESP32S2
  73. //Append mode is only supported on ESP32S2 now
  74. SPIHD_CHECK(append_mode == 0, "Append mode is only supported on ESP32S2 now", ESP_ERR_INVALID_ARG);
  75. #endif
  76. spi_chan_claimed = spicommon_periph_claim(host_id, "slave_hd");
  77. SPIHD_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
  78. spi_slave_hd_slot_t* host = calloc(1, sizeof(spi_slave_hd_slot_t));
  79. if (host == NULL) {
  80. ret = ESP_ERR_NO_MEM;
  81. goto cleanup;
  82. }
  83. spihost[host_id] = host;
  84. host->int_spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  85. host->dma_enabled = (config->dma_chan != SPI_DMA_DISABLED);
  86. if (host->dma_enabled) {
  87. ret = spicommon_slave_dma_chan_alloc(host_id, config->dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan);
  88. if (ret != ESP_OK) {
  89. goto cleanup;
  90. }
  91. }
  92. ret = spicommon_bus_initialize_io(host_id, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &host->flags);
  93. if (ret != ESP_OK) {
  94. goto cleanup;
  95. }
  96. gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
  97. spicommon_cs_initialize(host_id, config->spics_io_num, 0,
  98. !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
  99. host->append_mode = append_mode;
  100. spi_slave_hd_hal_config_t hal_config = {
  101. .host_id = host_id,
  102. .dma_in = SPI_LL_GET_HW(host_id),
  103. .dma_out = SPI_LL_GET_HW(host_id),
  104. .dma_enabled = host->dma_enabled,
  105. .tx_dma_chan = actual_tx_dma_chan,
  106. .rx_dma_chan = actual_rx_dma_chan,
  107. .append_mode = append_mode,
  108. .mode = config->mode,
  109. .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
  110. .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
  111. };
  112. if (host->dma_enabled) {
  113. //Malloc for all the DMA descriptors
  114. uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size(&host->hal, bus_config->max_transfer_sz);
  115. host->hal.dmadesc_tx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
  116. host->hal.dmadesc_rx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
  117. if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx) {
  118. ret = ESP_ERR_NO_MEM;
  119. goto cleanup;
  120. }
  121. //Get the actual SPI bus transaction size in bytes.
  122. host->max_transfer_sz = spi_salve_hd_hal_get_max_bus_size(&host->hal);
  123. } else {
  124. //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
  125. host->max_transfer_sz = 0;
  126. }
  127. //Init the hal according to the hal_config set above
  128. spi_slave_hd_hal_init(&host->hal, &hal_config);
  129. #ifdef CONFIG_PM_ENABLE
  130. ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", &host->pm_lock);
  131. if (ret != ESP_OK) {
  132. goto cleanup;
  133. }
  134. // Lock APB frequency while SPI slave driver is in use
  135. esp_pm_lock_acquire(host->pm_lock);
  136. #endif //CONFIG_PM_ENABLE
  137. //Create Queues and Semaphores
  138. host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  139. host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  140. if (!host->append_mode) {
  141. host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  142. host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
  143. if (!host->tx_trans_queue || !host->rx_trans_queue) {
  144. ret = ESP_ERR_NO_MEM;
  145. goto cleanup;
  146. }
  147. }
  148. #if CONFIG_IDF_TARGET_ESP32S2
  149. //Append mode is only supported on ESP32S2 now
  150. else {
  151. host->tx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
  152. host->rx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
  153. if (!host->tx_cnting_sem || !host->rx_cnting_sem) {
  154. ret = ESP_ERR_NO_MEM;
  155. goto cleanup;
  156. }
  157. }
  158. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  159. //Alloc intr
  160. if (!host->append_mode) {
  161. ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_segment,
  162. (void *)host, &host->intr);
  163. if (ret != ESP_OK) {
  164. goto cleanup;
  165. }
  166. ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_segment,
  167. (void *)host, &host->intr_dma);
  168. if (ret != ESP_OK) {
  169. goto cleanup;
  170. }
  171. }
  172. #if CONFIG_IDF_TARGET_ESP32S2
  173. //Append mode is only supported on ESP32S2 now
  174. else {
  175. ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_append,
  176. (void *)host, &host->intr);
  177. if (ret != ESP_OK) {
  178. goto cleanup;
  179. }
  180. ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
  181. (void *)host, &host->intr_dma);
  182. if (ret != ESP_OK) {
  183. goto cleanup;
  184. }
  185. }
  186. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  187. //Init callbacks
  188. memcpy((uint8_t*)&host->callback, (uint8_t*)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
  189. spi_event_t event = 0;
  190. if (host->callback.cb_buffer_tx!=NULL) event |= SPI_EV_BUF_TX;
  191. if (host->callback.cb_buffer_rx!=NULL) event |= SPI_EV_BUF_RX;
  192. if (host->callback.cb_cmd9!=NULL) event |= SPI_EV_CMD9;
  193. if (host->callback.cb_cmdA!=NULL) event |= SPI_EV_CMDA;
  194. spi_slave_hd_hal_enable_event_intr(&host->hal, event);
  195. return ESP_OK;
  196. cleanup:
  197. // Memory free is in the deinit function
  198. spi_slave_hd_deinit(host_id);
  199. return ret;
  200. }
  201. esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
  202. {
  203. spi_slave_hd_slot_t *host = spihost[host_id];
  204. if (host == NULL) return ESP_ERR_INVALID_ARG;
  205. if (host->tx_trans_queue) vQueueDelete(host->tx_trans_queue);
  206. if (host->tx_ret_queue) vQueueDelete(host->tx_ret_queue);
  207. if (host->rx_trans_queue) vQueueDelete(host->rx_trans_queue);
  208. if (host->rx_ret_queue) vQueueDelete(host->rx_ret_queue);
  209. if (host->tx_cnting_sem) vSemaphoreDelete(host->tx_cnting_sem);
  210. if (host->rx_cnting_sem) vSemaphoreDelete(host->rx_cnting_sem);
  211. if (host) {
  212. free(host->hal.dmadesc_tx);
  213. free(host->hal.dmadesc_rx);
  214. esp_intr_free(host->intr);
  215. esp_intr_free(host->intr_dma);
  216. #ifdef CONFIG_PM_ENABLE
  217. if (host->pm_lock) {
  218. esp_pm_lock_release(host->pm_lock);
  219. esp_pm_lock_delete(host->pm_lock);
  220. }
  221. #endif
  222. }
  223. spicommon_periph_free(host_id);
  224. if (host->dma_enabled) {
  225. spicommon_slave_free_dma(host_id);
  226. }
  227. free(host);
  228. spihost[host_id] = NULL;
  229. return ESP_OK;
  230. }
  231. static void tx_invoke(spi_slave_hd_slot_t* host)
  232. {
  233. portENTER_CRITICAL(&host->int_spinlock);
  234. spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_SEND);
  235. portEXIT_CRITICAL(&host->int_spinlock);
  236. }
  237. static void rx_invoke(spi_slave_hd_slot_t* host)
  238. {
  239. portENTER_CRITICAL(&host->int_spinlock);
  240. spi_slave_hd_hal_invoke_event_intr(&host->hal, SPI_EV_RECV);
  241. portEXIT_CRITICAL(&host->int_spinlock);
  242. }
  243. static inline IRAM_ATTR BaseType_t intr_check_clear_callback(spi_slave_hd_slot_t* host, spi_event_t ev, slave_cb_t cb)
  244. {
  245. BaseType_t cb_awoken = pdFALSE;
  246. if (spi_slave_hd_hal_check_clear_event(&host->hal, ev) && cb) {
  247. spi_slave_hd_event_t event = {.event = ev};
  248. cb(host->callback.arg, &event, &cb_awoken);
  249. }
  250. return cb_awoken;
  251. }
  252. static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
  253. {
  254. spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
  255. spi_slave_hd_callback_config_t *callback = &host->callback;
  256. spi_slave_hd_hal_context_t *hal = &host->hal;
  257. BaseType_t awoken = pdFALSE;
  258. BaseType_t ret;
  259. awoken |= intr_check_clear_callback(host, SPI_EV_BUF_TX, callback->cb_buffer_tx);
  260. awoken |= intr_check_clear_callback(host, SPI_EV_BUF_RX, callback->cb_buffer_rx);
  261. awoken |= intr_check_clear_callback(host, SPI_EV_CMD9, callback->cb_cmd9);
  262. awoken |= intr_check_clear_callback(host, SPI_EV_CMDA, callback->cb_cmdA);
  263. bool tx_done = false;
  264. bool rx_done = false;
  265. portENTER_CRITICAL_ISR(&host->int_spinlock);
  266. if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
  267. tx_done = true;
  268. }
  269. if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
  270. rx_done = true;
  271. }
  272. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  273. if (tx_done) {
  274. bool ret_queue = true;
  275. if (callback->cb_sent) {
  276. spi_slave_hd_event_t ev = {
  277. .event = SPI_EV_SEND,
  278. .trans = host->tx_desc,
  279. };
  280. BaseType_t cb_awoken = pdFALSE;
  281. ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
  282. awoken |= cb_awoken;
  283. }
  284. if (ret_queue) {
  285. ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_desc, &awoken);
  286. // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
  287. assert(ret == pdTRUE);
  288. }
  289. host->tx_desc = NULL;
  290. }
  291. if (rx_done) {
  292. bool ret_queue = true;
  293. host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
  294. if (callback->cb_recv) {
  295. spi_slave_hd_event_t ev = {
  296. .event = SPI_EV_RECV,
  297. .trans = host->rx_desc,
  298. };
  299. BaseType_t cb_awoken = pdFALSE;
  300. ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
  301. awoken |= cb_awoken;
  302. }
  303. if (ret_queue) {
  304. ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_desc, &awoken);
  305. // The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
  306. assert(ret == pdTRUE);
  307. }
  308. host->rx_desc = NULL;
  309. }
  310. bool tx_sent = false;
  311. bool rx_sent = false;
  312. if (!host->tx_desc) {
  313. ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
  314. if (ret == pdTRUE) {
  315. spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
  316. tx_sent = true;
  317. if (callback->cb_send_dma_ready) {
  318. spi_slave_hd_event_t ev = {
  319. .event = SPI_EV_SEND_DMA_READY,
  320. .trans = host->tx_desc,
  321. };
  322. BaseType_t cb_awoken = pdFALSE;
  323. callback->cb_send_dma_ready(callback->arg, &ev, &cb_awoken);
  324. awoken |= cb_awoken;
  325. }
  326. }
  327. }
  328. if (!host->rx_desc) {
  329. ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
  330. if (ret == pdTRUE) {
  331. spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
  332. rx_sent = true;
  333. if (callback->cb_recv_dma_ready) {
  334. spi_slave_hd_event_t ev = {
  335. .event = SPI_EV_RECV_DMA_READY,
  336. .trans = host->rx_desc,
  337. };
  338. BaseType_t cb_awoken = pdFALSE;
  339. callback->cb_recv_dma_ready(callback->arg, &ev, &cb_awoken);
  340. awoken |= cb_awoken;
  341. }
  342. }
  343. }
  344. portENTER_CRITICAL_ISR(&host->int_spinlock);
  345. if (tx_sent) {
  346. spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_SEND);
  347. }
  348. if (rx_sent) {
  349. spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_RECV);
  350. }
  351. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  352. if (awoken==pdTRUE) portYIELD_FROM_ISR();
  353. }
  354. #if CONFIG_IDF_TARGET_ESP32S2
  355. //Append mode is only supported on ESP32S2 now
  356. static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
  357. {
  358. spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
  359. spi_slave_hd_callback_config_t *callback = &host->callback;
  360. spi_slave_hd_hal_context_t *hal = &host->hal;
  361. BaseType_t awoken = pdFALSE;
  362. BaseType_t ret;
  363. bool tx_done = false;
  364. bool rx_done = false;
  365. portENTER_CRITICAL_ISR(&host->int_spinlock);
  366. if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_SEND)) {
  367. tx_done = true;
  368. }
  369. if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_RECV)) {
  370. rx_done = true;
  371. }
  372. portEXIT_CRITICAL_ISR(&host->int_spinlock);
  373. if (tx_done) {
  374. spi_slave_hd_data_t *trans_desc;
  375. while (1) {
  376. bool trans_finish = false;
  377. trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
  378. if (!trans_finish) {
  379. break;
  380. }
  381. bool ret_queue = true;
  382. if (callback->cb_sent) {
  383. spi_slave_hd_event_t ev = {
  384. .event = SPI_EV_SEND,
  385. .trans = trans_desc,
  386. };
  387. BaseType_t cb_awoken = pdFALSE;
  388. ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
  389. awoken |= cb_awoken;
  390. }
  391. if (ret_queue) {
  392. ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
  393. assert(ret == pdTRUE);
  394. ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
  395. assert(ret == pdTRUE);
  396. }
  397. }
  398. }
  399. if (rx_done) {
  400. spi_slave_hd_data_t *trans_desc;
  401. size_t trans_len;
  402. while (1) {
  403. bool trans_finish = false;
  404. trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
  405. if (!trans_finish) {
  406. break;
  407. }
  408. trans_desc->trans_len = trans_len;
  409. bool ret_queue = true;
  410. if (callback->cb_recv) {
  411. spi_slave_hd_event_t ev = {
  412. .event = SPI_EV_RECV,
  413. .trans = trans_desc,
  414. };
  415. BaseType_t cb_awoken = pdFALSE;
  416. ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
  417. awoken |= cb_awoken;
  418. }
  419. if (ret_queue) {
  420. ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
  421. assert(ret == pdTRUE);
  422. ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
  423. assert(ret == pdTRUE);
  424. }
  425. }
  426. }
  427. if (awoken==pdTRUE) portYIELD_FROM_ISR();
  428. }
  429. #endif //#if CONFIG_IDF_TARGET_ESP32S2
  430. static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
  431. {
  432. spi_slave_hd_slot_t *host = spihost[host_id];
  433. spi_slave_hd_data_t *trans;
  434. BaseType_t ret;
  435. if (chan == SPI_SLAVE_CHAN_TX) {
  436. ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
  437. } else {
  438. ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
  439. }
  440. if (ret == pdFALSE) {
  441. return ESP_ERR_TIMEOUT;
  442. }
  443. *out_trans = trans;
  444. return ESP_OK;
  445. }
  446. //---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
  447. esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t* trans, TickType_t timeout)
  448. {
  449. spi_slave_hd_slot_t* host = spihost[host_id];
  450. SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
  451. SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
  452. SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
  453. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  454. if (chan == SPI_SLAVE_CHAN_TX) {
  455. BaseType_t ret = xQueueSend(host->tx_trans_queue, &trans, timeout);
  456. if (ret == pdFALSE) {
  457. return ESP_ERR_TIMEOUT;
  458. }
  459. tx_invoke(host);
  460. } else { //chan == SPI_SLAVE_CHAN_RX
  461. BaseType_t ret = xQueueSend(host->rx_trans_queue, &trans, timeout);
  462. if (ret == pdFALSE) {
  463. return ESP_ERR_TIMEOUT;
  464. }
  465. rx_invoke(host);
  466. }
  467. return ESP_OK;
  468. }
  469. esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t** out_trans, TickType_t timeout)
  470. {
  471. esp_err_t ret;
  472. spi_slave_hd_slot_t* host = spihost[host_id];
  473. SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
  474. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  475. ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
  476. return ret;
  477. }
  478. void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_data, size_t len)
  479. {
  480. spi_slave_hd_hal_read_buffer(&spihost[host_id]->hal, addr, out_data, len);
  481. }
  482. void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *data, size_t len)
  483. {
  484. spi_slave_hd_hal_write_buffer(&spihost[host_id]->hal, addr, data, len);
  485. }
  486. #if CONFIG_IDF_TARGET_ESP32S2
  487. //Append mode is only supported on ESP32S2 now
  488. //---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
  489. esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
  490. {
  491. esp_err_t err;
  492. spi_slave_hd_slot_t *host = spihost[host_id];
  493. spi_slave_hd_hal_context_t *hal = &host->hal;
  494. SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
  495. SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
  496. SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
  497. SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
  498. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  499. if (chan == SPI_SLAVE_CHAN_TX) {
  500. BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
  501. if (ret == pdFALSE) {
  502. return ESP_ERR_TIMEOUT;
  503. }
  504. err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
  505. } else {
  506. BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
  507. if (ret == pdFALSE) {
  508. return ESP_ERR_TIMEOUT;
  509. }
  510. err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
  511. }
  512. if (err != ESP_OK) {
  513. ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");
  514. }
  515. return err;
  516. }
  517. esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
  518. {
  519. esp_err_t ret;
  520. spi_slave_hd_slot_t* host = spihost[host_id];
  521. SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
  522. SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
  523. ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
  524. return ret;
  525. }
  526. #endif //#if CONFIG_IDF_TARGET_ESP32S2