Ver código fonte

Merge branch 'feature/new_restart_mode_driver_and_ut_for_spi_slave_hd' into 'master'

spi_slave_halfduplex: append mode driver

See merge request espressif/esp-idf!10845
Michael (XIAO Xufeng) 5 anos atrás
pai
commit
06f22fc4b0

+ 79 - 39
components/driver/include/driver/spi_slave_hd.h

@@ -33,16 +33,16 @@ extern "C"
 
 /// Descriptor of data to send/receive
 typedef struct {
-    uint8_t*    data;       ///< Buffer to send, must be DMA capable
-    size_t      len;        ///< Len of data to send/receive. For receiving the buffer length should be multiples of 4 bytes, otherwise the extra part will be truncated.
-    size_t      trans_len;  ///< Data actually received
-    void*       arg;        ///< Extra argument indiciating this data
+    uint8_t* data;                              ///< Buffer to send, must be DMA capable
+    size_t   len;                               ///< Len of data to send/receive. For receiving the buffer length should be multiples of 4 bytes, otherwise the extra part will be truncated.
+    size_t   trans_len;                         ///< For RX direction, it indicates the data actually received. For TX direction, it is meaningless.
+    void*    arg;                               ///< Extra argument indiciating this data
 } spi_slave_hd_data_t;
 
 /// Information of SPI Slave HD event
 typedef struct {
-    spi_event_t event;          ///< Event type
-    spi_slave_hd_data_t* trans; ///< Corresponding transaction for SPI_EV_SEND and SPI_EV_RECV events
+    spi_event_t          event;                 ///< Event type
+    spi_slave_hd_data_t* trans;                 ///< Corresponding transaction for SPI_EV_SEND and SPI_EV_RECV events
 } spi_slave_hd_event_t;
 
 /// Callback for SPI Slave HD
@@ -50,38 +50,39 @@ typedef bool (*slave_cb_t)(void* arg, spi_slave_hd_event_t* event, BaseType_t* a
 
 /// Channel of SPI Slave HD to do data transaction
 typedef enum {
-    SPI_SLAVE_CHAN_TX = 0,  ///< The output channel (RDDMA)
-    SPI_SLAVE_CHAN_RX = 1,  ///< The input channel (WRDMA)
+    SPI_SLAVE_CHAN_TX = 0,                      ///< The output channel (RDDMA)
+    SPI_SLAVE_CHAN_RX = 1,                      ///< The input channel (WRDMA)
 } spi_slave_chan_t;
 
 /// Callback configuration structure for SPI Slave HD
 typedef struct {
-    slave_cb_t cb_recv;         ///< Callback when receive data
-    slave_cb_t cb_sent;         ///< Callback when data sent
-    slave_cb_t cb_buffer_tx;    ///< Callback when master reads from shared buffer
-    slave_cb_t cb_buffer_rx;    ///< Callback when master writes to shared buffer
-    slave_cb_t cb_cmd9;         ///< Callback when CMD9 received
-    slave_cb_t cb_cmdA;         ///< Callback when CMDA received
-    void* arg;                  ///< Argument indicating this SPI Slave HD peripheral instance
+    slave_cb_t cb_buffer_tx;                    ///< Callback when master reads from shared buffer
+    slave_cb_t cb_buffer_rx;                    ///< Callback when master writes to shared buffer
+    slave_cb_t cb_sent;                         ///< Callback when data are sent
+    slave_cb_t cb_recv;                         ///< Callback when data are received
+    slave_cb_t cb_cmd9;                         ///< Callback when CMD9 received
+    slave_cb_t cb_cmdA;                         ///< Callback when CMDA received
+    void* arg;                                  ///< Argument indicating this SPI Slave HD peripheral instance
 } spi_slave_hd_callback_config_t;
 
-/// Configuration structure for the SPI Slave HD driver
-typedef struct {
-    int spics_io_num;               ///< CS GPIO pin for this device
-    uint32_t flags;                 ///< Bitwise OR of SPI_SLAVE_HD_* flags
-#define SPI_SLAVE_HD_TXBIT_LSBFIRST          (1<<0)  ///< Transmit command/address/data LSB first instead of the default MSB first
-#define SPI_SLAVE_HD_RXBIT_LSBFIRST          (1<<1)  ///< Receive data LSB first instead of the default MSB first
-#define SPI_SLAVE_HD_BIT_LSBFIRST            (SPI_SLAVE_HD_TXBIT_LSBFIRST|SPI_SLAVE_HD_RXBIT_LSBFIRST) ///< Transmit and receive LSB first
 
-    uint8_t mode;                   ///< SPI mode (0-3)
-    int     command_bits;           ///< command field bits, multiples of 8 and at least 8.
-    int     address_bits;           ///< address field bits, multiples of 8 and at least 8.
-    int     dummy_bits;             ///< dummy field bits, multiples of 8 and at least 8.
+//flags for ``spi_slave_hd_slot_config_t`` to use
+#define SPI_SLAVE_HD_TXBIT_LSBFIRST     (1<<0)  ///< Transmit command/address/data LSB first instead of the default MSB first
+#define SPI_SLAVE_HD_RXBIT_LSBFIRST     (1<<1)  ///< Receive data LSB first instead of the default MSB first
+#define SPI_SLAVE_HD_BIT_LSBFIRST       (SPI_SLAVE_HD_TXBIT_LSBFIRST|SPI_SLAVE_HD_RXBIT_LSBFIRST) ///< Transmit and receive LSB first
+#define SPI_SLAVE_HD_APPEND_MODE        (1<<2)  ///< Adopt DMA append mode for transactions. In this mode, users can load(append) DMA descriptors without stopping the DMA
 
-    int     queue_size;             ///< Transaction queue size. This sets how many transactions can be 'in the air' (queued using spi_slave_hd_queue_trans but not yet finished using spi_slave_hd_get_trans_result) at the same time
-
-    int dma_chan;                   ///< DMA channel used
-    spi_slave_hd_callback_config_t cb_config; ///< Callback configuration
+/// Configuration structure for the SPI Slave HD driver
+typedef struct {
+    uint8_t  mode;                              ///< SPI mode (0-3)
+    uint32_t spics_io_num;                      ///< CS GPIO pin for this device
+    uint32_t flags;                             ///< Bitwise OR of SPI_SLAVE_HD_* flags
+    uint32_t command_bits;                      ///< command field bits, multiples of 8 and at least 8.
+    uint32_t address_bits;                      ///< address field bits, multiples of 8 and at least 8.
+    uint32_t dummy_bits;                        ///< dummy field bits, multiples of 8 and at least 8.
+    uint32_t queue_size;                        ///< Transaction queue size. This sets how many transactions can be 'in the air' (queued using spi_slave_hd_queue_trans but not yet finished using spi_slave_hd_get_trans_result) at the same time
+    uint32_t dma_chan;                          ///< DMA channel used
+    spi_slave_hd_callback_config_t cb_config;   ///< Callback configuration
 } spi_slave_hd_slot_config_t;
 
 /**
@@ -111,36 +112,39 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
 esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id);
 
 /**
- * @brief Queue data transaction
+ * @brief Queue transactions (segment mode)
  *
  * @param host_id   Host to queue the transaction
- * @param chan      Channel to queue the data, SPI_SLAVE_CHAN_TX or SPI_SLAVE_CHAN_RX
- * @param trans     Descriptor of data to queue
+ * @param chan      SPI_SLAVE_CHAN_TX or SPI_SLAVE_CHAN_RX
+ * @param trans     Transaction descriptors
  * @param timeout   Timeout before the data is queued
  * @return
  *  - ESP_OK: on success
  *  - ESP_ERR_INVALID_ARG: The input argument is invalid. Can be the following reason:
  *      - The buffer given is not DMA capable
  *      - The length of data is invalid (not larger than 0, or exceed the max transfer length)
- *      - The function is invalid
- *  - ESP_ERR_TIMEOUT: Cannot queue the data before timeout. This is quite possible if the master
- *    doesn't read/write the slave on time.
+ *      - The transaction direction is invalid
+ *  - ESP_ERR_TIMEOUT: Cannot queue the data before timeout. Master is still processing previous transaction.
+ *  - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under segment mode.
  */
 esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t* trans, TickType_t timeout);
 
 /**
- * @brief Get the result of a data transaction
+ * @brief Get the result of a data transaction (segment mode)
+ *
+ * @note This API should be called successfully the same times as the ``spi_slave_hd_queue_trans``.
  *
  * @param host_id   Host to queue the transaction
  * @param chan      Channel to get the result, SPI_SLAVE_CHAN_TX or SPI_SLAVE_CHAN_RX
- * @param[out] out_trans Output descriptor of the returned transaction
+ * @param[out] out_trans Pointer to the transaction descriptor (``spi_slave_hd_data_t``) passed to the driver before. Hardware has finished this transaction. Member ``trans_len`` indicates the actual number of bytes of received data, it's meaningless for TX.
  * @param timeout   Timeout before the result is got
  * @return
  *  - ESP_OK: on success
  *  - ESP_ERR_INVALID_ARG: Function is not valid
  *  - ESP_ERR_TIMEOUT: There's no transaction done before timeout
+ *  - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under segment mode.
  */
-esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t** out_trans, TickType_t timeout);
+esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout);
 
 /**
  * @brief Read the shared registers
@@ -162,6 +166,42 @@ void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_
  */
 void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *data, size_t len);
 
+/**
+ * @brief Load transactions (append mode)
+ *
+ * @note In this mode, user transaction descriptors will be appended to the DMA and the DMA will keep processing the data without stopping
+ *
+ * @param host_id   Host to load transactions
+ * @param chan      SPI_SLAVE_CHAN_TX or SPI_SLAVE_CHAN_RX
+ * @param trans     Transaction descriptor
+ * @param timeout   Timeout before the transaction is loaded
+ * @return
+ *  - ESP_OK: on success
+ *  - ESP_ERR_INVALID_ARG: The input argument is invalid. Can be the following reason:
+ *      - The buffer given is not DMA capable
+ *      - The length of data is invalid (not larger than 0, or exceed the max transfer length)
+ *      - The transaction direction is invalid
+ *  - ESP_ERR_TIMEOUT: Master is still processing previous transaction. There is no available transaction for slave to load
+ *  - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under append mode.
+ */
+esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout);
+
+/**
+ * @brief Get the result of a data transaction (append mode)
+ *
+ * @note This API should be called the same times as the ``spi_slave_hd_append_trans``
+ *
+ * @param host_id   Host to load the transaction
+ * @param chan      SPI_SLAVE_CHAN_TX or SPI_SLAVE_CHAN_RX
+ * @param[out] out_trans Pointer to the transaction descriptor (``spi_slave_hd_data_t``) passed to the driver before. Hardware has finished this transaction. Member ``trans_len`` indicates the actual number of bytes of received data, it's meaningless for TX.
+ * @param timeout   Timeout before the result is got
+ * @return
+ *  - ESP_OK: on success
+ *  - ESP_ERR_INVALID_ARG: Function is not valid
+ *  - ESP_ERR_TIMEOUT: There's no transaction done before timeout
+ *  - ESP_ERR_INVALID_STATE: Function called in invalid state. This API should be called under append mode.
+ */
+esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout);
 
 #ifdef __cplusplus
 }

+ 1 - 1
components/driver/spi_common.c

@@ -220,7 +220,7 @@ void spicommon_connect_spi_and_dma(spi_host_device_t host, int dma_chan)
     spi_dma_connect_tx_channel_to_periph(gdma_chan, periph_id);
     spi_dma_set_rx_channel_priority(gdma_chan, 1);
     spi_dma_set_tx_channel_priority(gdma_chan, 1);
-#endif
+#endif  //#elif SOC_GDMA_SUPPORTED
 }
 
 static bool bus_uses_iomux_pins(spi_host_device_t host, const spi_bus_config_t* bus_config)

+ 269 - 70
components/driver/spi_slave_hd.c

@@ -14,12 +14,13 @@
 
 #include "esp_log.h"
 #include "freertos/FreeRTOS.h"
+#include "freertos/semphr.h"
+#include "freertos/queue.h"
 #include "freertos/ringbuf.h"
 #include "driver/gpio.h"
 #include "driver/spi_common_internal.h"
-#include "hal/spi_slave_hd_hal.h"
-
 #include "driver/spi_slave_hd.h"
+#include "hal/spi_slave_hd_hal.h"
 
 
 //SPI1 can never be used as the slave
@@ -27,27 +28,25 @@
 #define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
 
 typedef struct {
-    spi_slave_hd_hal_context_t hal;
     int dma_chan;
-
+    int max_transfer_sz;
+    uint32_t flags;
+    portMUX_TYPE int_spinlock;
     intr_handle_t intr;
     intr_handle_t intr_dma;
     spi_slave_hd_callback_config_t callback;
+    spi_slave_hd_hal_context_t hal;
+    bool append_mode;
 
     QueueHandle_t tx_trans_queue;
     QueueHandle_t tx_ret_queue;
     QueueHandle_t rx_trans_queue;
     QueueHandle_t rx_ret_queue;
+    QueueHandle_t tx_cnting_sem;
+    QueueHandle_t rx_cnting_sem;
 
     spi_slave_hd_data_t* tx_desc;
     spi_slave_hd_data_t* rx_desc;
-
-    uint32_t flags;
-
-    int max_transfer_sz;
-
-    portMUX_TYPE int_spinlock;
-
 #ifdef CONFIG_PM_ENABLE
     esp_pm_lock_handle_t pm_lock;
 #endif
@@ -56,17 +55,25 @@ typedef struct {
 static spi_slave_hd_slot_t *spihost[SOC_SPI_PERIPH_NUM];
 static const char TAG[] = "slave_hd";
 
-
-static void spi_slave_hd_intr(void* arg);
+static void spi_slave_hd_intr_segment(void *arg);
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+static void spi_slave_hd_intr_append(void *arg);
+#endif
 
 esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config,
                             const spi_slave_hd_slot_config_t *config)
 {
     bool spi_chan_claimed, dma_chan_claimed;
+    bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
     esp_err_t ret = ESP_OK;
 
     SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
     SPIHD_CHECK(config->dma_chan == 0 || config->dma_chan == host_id, "invalid dma channel", ESP_ERR_INVALID_ARG);
+#if !CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+    SPIHD_CHECK(append_mode == 0, "Append mode is only supported on ESP32S2 now", ESP_ERR_INVALID_ARG);
+#endif
 
     spi_chan_claimed = spicommon_periph_claim(host_id, "slave_hd");
     SPIHD_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
@@ -100,35 +107,39 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
     gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
     spicommon_cs_initialize(host_id, config->spics_io_num, 0,
             !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
-    host->dma_chan = config->dma_chan;
+    host->append_mode = append_mode;
 
     spi_slave_hd_hal_config_t hal_config = {
         .host_id = host_id,
         .dma_in = SPI_LL_GET_HW(host_id),
         .dma_out = SPI_LL_GET_HW(host_id),
+        .dma_chan = config->dma_chan,
+        .append_mode = append_mode,
+        .mode = config->mode,
         .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
         .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
-        .dma_chan = config->dma_chan,
-        .mode = config->mode
     };
-    spi_slave_hd_hal_init(&host->hal, &hal_config);
 
     if (config->dma_chan != 0) {
-        //See how many dma descriptors we need and allocate them
-        int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN;
-        if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given
-        host->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN;
-        host->hal.dmadesc_tx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
-        host->hal.dmadesc_rx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
-
-        if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx ) {
+        //Malloc for all the DMA descriptors
+        uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size(&host->hal, bus_config->max_transfer_sz);
+        host->hal.dmadesc_tx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
+        host->hal.dmadesc_rx = heap_caps_malloc(total_desc_size, MALLOC_CAP_DMA);
+        if (!host->hal.dmadesc_tx || !host->hal.dmadesc_rx) {
             ret = ESP_ERR_NO_MEM;
             goto cleanup;
         }
+
+        //Get the actual SPI bus transaction size in bytes.
+        host->max_transfer_sz = spi_salve_hd_hal_get_max_bus_size(&host->hal);
     } else {
         //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
         host->max_transfer_sz = 0;
     }
+
+    //Init the hal according to the hal_config set above
+    spi_slave_hd_hal_init(&host->hal, &hal_config);
+
 #ifdef CONFIG_PM_ENABLE
     ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", &host->pm_lock);
     if (ret != ESP_OK) {
@@ -138,30 +149,60 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
     esp_pm_lock_acquire(host->pm_lock);
 #endif //CONFIG_PM_ENABLE
 
-    //Create queues
-    host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
+    //Create Queues and Semaphores
     host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
-    host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
     host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
-    if (!host->tx_trans_queue || !host->tx_ret_queue ||
-        !host->rx_trans_queue || !host->rx_ret_queue) {
-        ret = ESP_ERR_NO_MEM;
-        goto cleanup;
+    if (!host->append_mode) {
+        host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
+        host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
+        if (!host->tx_trans_queue || !host->rx_trans_queue) {
+            ret = ESP_ERR_NO_MEM;
+            goto cleanup;
+        }
+    }
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+    else {
+        host->tx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
+        host->rx_cnting_sem = xSemaphoreCreateCounting(config->queue_size, config->queue_size);
+        if (!host->tx_cnting_sem || !host->rx_cnting_sem) {
+            ret = ESP_ERR_NO_MEM;
+            goto cleanup;
+        }
     }
+#endif  //#if CONFIG_IDF_TARGET_ESP32S2
 
-    ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr,
+    //Alloc intr
+    if (!host->append_mode) {
+        ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_segment,
                          (void *)host, &host->intr);
-    if (ret != ESP_OK) {
-        goto cleanup;
+        if (ret != ESP_OK) {
+            goto cleanup;
+        }
+        ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_segment,
+                            (void *)host, &host->intr_dma);
+        if (ret != ESP_OK) {
+            goto cleanup;
+        }
     }
-
-    ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr,
-                         (void *)host, &host->intr_dma);
-    if (ret != ESP_OK) {
-        goto cleanup;
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+    else {
+        ret = esp_intr_alloc(spicommon_irqsource_for_host(host_id), 0, spi_slave_hd_intr_append,
+                         (void *)host, &host->intr);
+        if (ret != ESP_OK) {
+            goto cleanup;
+        }
+        ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
+                            (void *)host, &host->intr_dma);
+        if (ret != ESP_OK) {
+            goto cleanup;
+        }
     }
-    memcpy((uint8_t*)&host->callback, (uint8_t*)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
+#endif  //#if CONFIG_IDF_TARGET_ESP32S2
 
+    //Init callbacks
+    memcpy((uint8_t*)&host->callback, (uint8_t*)&config->cb_config, sizeof(spi_slave_hd_callback_config_t));
     spi_event_t event = 0;
     if (host->callback.cb_buffer_tx!=NULL) event |= SPI_EV_BUF_TX;
     if (host->callback.cb_buffer_rx!=NULL) event |= SPI_EV_BUF_RX;
@@ -186,6 +227,8 @@ esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
     if (host->tx_ret_queue) vQueueDelete(host->tx_ret_queue);
     if (host->rx_trans_queue) vQueueDelete(host->rx_trans_queue);
     if (host->rx_ret_queue) vQueueDelete(host->rx_ret_queue);
+    if (host->tx_cnting_sem) vSemaphoreDelete(host->tx_cnting_sem);
+    if (host->rx_cnting_sem) vSemaphoreDelete(host->rx_cnting_sem);
     if (host) {
         free(host->hal.dmadesc_tx);
         free(host->hal.dmadesc_rx);
@@ -232,27 +275,27 @@ static inline IRAM_ATTR BaseType_t intr_check_clear_callback(spi_slave_hd_slot_t
     return cb_awoken;
 }
 
-static IRAM_ATTR void spi_slave_hd_intr(void* arg)
+static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
 {
-    spi_slave_hd_slot_t* host = (spi_slave_hd_slot_t*)arg;
-    BaseType_t awoken = pdFALSE;
+    spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
     spi_slave_hd_callback_config_t *callback = &host->callback;
-    ESP_EARLY_LOGV("spi_hd", "intr.");
+    spi_slave_hd_hal_context_t *hal = &host->hal;
+    BaseType_t awoken = pdFALSE;
+    BaseType_t ret;
 
     awoken |= intr_check_clear_callback(host, SPI_EV_BUF_TX, callback->cb_buffer_tx);
     awoken |= intr_check_clear_callback(host, SPI_EV_BUF_RX, callback->cb_buffer_rx);
     awoken |= intr_check_clear_callback(host, SPI_EV_CMD9,   callback->cb_cmd9);
     awoken |= intr_check_clear_callback(host, SPI_EV_CMDA,   callback->cb_cmdA);
 
-    BaseType_t ret;
     bool tx_done = false;
     bool rx_done = false;
 
     portENTER_CRITICAL_ISR(&host->int_spinlock);
-    if (host->tx_desc && spi_slave_hd_hal_check_disable_event(&host->hal, SPI_EV_SEND)) {
+    if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
         tx_done = true;
     }
-    if (host->rx_desc && spi_slave_hd_hal_check_disable_event(&host->hal, SPI_EV_RECV)) {
+    if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
         rx_done = true;
     }
     portEXIT_CRITICAL_ISR(&host->int_spinlock);
@@ -277,9 +320,7 @@ static IRAM_ATTR void spi_slave_hd_intr(void* arg)
     }
     if (rx_done) {
         bool ret_queue = true;
-
-        host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_get_len(&host->hal);
-
+        host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
         if (callback->cb_recv) {
             spi_slave_hd_event_t ev = {
                 .event = SPI_EV_RECV,
@@ -302,33 +343,147 @@ static IRAM_ATTR void spi_slave_hd_intr(void* arg)
     if (!host->tx_desc) {
         ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
         if (ret == pdTRUE) {
-            spi_slave_hd_hal_txdma(&host->hal, host->tx_desc->data, host->tx_desc->len);
+            spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
             tx_sent = true;
         }
     }
     if (!host->rx_desc) {
         ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
         if (ret == pdTRUE) {
-            spi_slave_hd_hal_rxdma(&host->hal, host->rx_desc->data, host->rx_desc->len);
+            spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
             rx_sent = true;
         }
     }
 
     portENTER_CRITICAL_ISR(&host->int_spinlock);
+    if (tx_sent) {
+        spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_SEND);
+    }
     if (rx_sent) {
-        spi_slave_hd_hal_enable_event_intr(&host->hal, SPI_EV_RECV);
+        spi_slave_hd_hal_enable_event_intr(hal, SPI_EV_RECV);
     }
-    if (tx_sent) {
-        spi_slave_hd_hal_enable_event_intr(&host->hal, SPI_EV_SEND);
+    portEXIT_CRITICAL_ISR(&host->int_spinlock);
+
+    if (awoken==pdTRUE) portYIELD_FROM_ISR();
+}
+
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
+{
+    spi_slave_hd_slot_t *host = (spi_slave_hd_slot_t*)arg;
+    spi_slave_hd_callback_config_t *callback = &host->callback;
+    spi_slave_hd_hal_context_t *hal = &host->hal;
+    BaseType_t awoken = pdFALSE;
+    BaseType_t ret;
+
+    bool tx_done = false;
+    bool rx_done = false;
+    portENTER_CRITICAL_ISR(&host->int_spinlock);
+    if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_SEND)) {
+        tx_done = true;
+    }
+    if (spi_slave_hd_hal_check_clear_event(hal, SPI_EV_RECV)) {
+        rx_done = true;
     }
     portEXIT_CRITICAL_ISR(&host->int_spinlock);
 
+    if (tx_done) {
+        spi_slave_hd_data_t *trans_desc;
+        while (1) {
+            bool trans_finish = false;
+            portENTER_CRITICAL_ISR(&host->int_spinlock);
+            trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
+            portEXIT_CRITICAL_ISR(&host->int_spinlock);
+            if (!trans_finish) {
+                break;
+            }
+
+            bool ret_queue = true;
+            if (callback->cb_sent) {
+                spi_slave_hd_event_t ev = {
+                    .event = SPI_EV_SEND,
+                    .trans = trans_desc,
+                };
+                BaseType_t cb_awoken = pdFALSE;
+                ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
+                awoken |= cb_awoken;
+            }
+
+            if (ret_queue) {
+                ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
+                assert(ret == pdTRUE);
+
+                ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
+                assert(ret == pdTRUE);
+            }
+        }
+    }
+
+    if (rx_done) {
+        spi_slave_hd_data_t *trans_desc;
+        size_t trans_len;
+        while (1) {
+            bool trans_finish = false;
+            portENTER_CRITICAL_ISR(&host->int_spinlock);
+            trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
+            portEXIT_CRITICAL_ISR(&host->int_spinlock);
+            if (!trans_finish) {
+                break;
+            }
+            trans_desc->trans_len = trans_len;
+
+            bool ret_queue = true;
+            if (callback->cb_recv) {
+                spi_slave_hd_event_t ev = {
+                    .event = SPI_EV_RECV,
+                    .trans = trans_desc,
+                };
+                BaseType_t cb_awoken = pdFALSE;
+                ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
+                awoken |= cb_awoken;
+            }
+
+            if (ret_queue) {
+                ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
+                assert(ret == pdTRUE);
+
+                ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
+                assert(ret == pdTRUE);
+            }
+        }
+    }
+
     if (awoken==pdTRUE) portYIELD_FROM_ISR();
 }
+#endif //#if CONFIG_IDF_TARGET_ESP32S2
 
+static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
+{
+    spi_slave_hd_slot_t *host = spihost[host_id];
+    spi_slave_hd_data_t *trans;
+    BaseType_t ret;
+
+    if (chan == SPI_SLAVE_CHAN_TX) {
+        ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
+    } else {
+        ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
+    }
+    if (ret == pdFALSE) {
+        return ESP_ERR_TIMEOUT;
+    }
+
+    *out_trans = trans;
+    return ESP_OK;
+}
+
+//---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
 esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t* trans, TickType_t timeout)
 {
     spi_slave_hd_slot_t* host = spihost[host_id];
+
+    SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
+    SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
     SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
     SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
     SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
@@ -351,22 +506,14 @@ esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t c
 
 esp_err_t spi_slave_hd_get_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t** out_trans, TickType_t timeout)
 {
-    SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
-
+    esp_err_t ret;
     spi_slave_hd_slot_t* host = spihost[host_id];
-    BaseType_t ret;
-    spi_slave_hd_data_t *data;
-    if (chan == SPI_SLAVE_CHAN_TX) {
-        ret = xQueueReceive(host->tx_ret_queue, &data, timeout);
-    } else { // chan == SPI_SLAVE_CHAN_RX
-        ret = xQueueReceive(host->rx_ret_queue, &data, timeout);
-    }
 
-    if (ret == pdFALSE) {
-        return ESP_ERR_TIMEOUT;
-    }
-    *out_trans = data;
-    return ESP_OK;
+    SPIHD_CHECK(host->append_mode == 0, "This API should be used for SPI Slave HD Segment Mode", ESP_ERR_INVALID_STATE);
+    SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
+    ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
+
+    return ret;
 }
 
 void spi_slave_hd_read_buffer(spi_host_device_t host_id, int addr, uint8_t *out_data, size_t len)
@@ -378,3 +525,55 @@ void spi_slave_hd_write_buffer(spi_host_device_t host_id, int addr, uint8_t *dat
 {
     spi_slave_hd_hal_write_buffer(&spihost[host_id]->hal, addr, data, len);
 }
+
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+//---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
+esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t *trans, TickType_t timeout)
+{
+    esp_err_t err;
+    spi_slave_hd_slot_t *host = spihost[host_id];
+    spi_slave_hd_hal_context_t *hal = &host->hal;
+
+    SPIHD_CHECK(trans->len <= SPI_MAX_DMA_LEN, "Currently we only support transaction with data length within 4092 bytes", ESP_ERR_INVALID_ARG);
+    SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
+    SPIHD_CHECK(esp_ptr_dma_capable(trans->data), "The buffer should be DMA capable.", ESP_ERR_INVALID_ARG);
+    SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
+    SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
+
+    if (chan == SPI_SLAVE_CHAN_TX) {
+        BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
+        if (ret == pdFALSE) {
+            return ESP_ERR_TIMEOUT;
+        }
+        portENTER_CRITICAL(&host->int_spinlock);
+        err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
+        portEXIT_CRITICAL(&host->int_spinlock);
+    } else {
+        BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
+        if (ret == pdFALSE) {
+            return ESP_ERR_TIMEOUT;
+        }
+        portENTER_CRITICAL(&host->int_spinlock);
+        err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
+        portEXIT_CRITICAL(&host->int_spinlock);
+    }
+    if (err != ESP_OK) {
+        ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");
+    }
+
+    return err;
+}
+
+esp_err_t spi_slave_hd_get_append_trans_res(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
+{
+    esp_err_t ret;
+    spi_slave_hd_slot_t* host = spihost[host_id];
+
+    SPIHD_CHECK(host->append_mode == 1, "This API should be used for SPI Slave HD Append Mode", ESP_ERR_INVALID_STATE);
+    SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
+    ret = get_ret_queue_result(host_id, chan, out_trans, timeout);
+
+    return ret;
+}
+#endif //#if CONFIG_IDF_TARGET_ESP32S2

+ 8 - 5
components/driver/test/test_spi_slave_hd.c

@@ -21,7 +21,7 @@
 #include "unity.h"
 #include "test/test_common_spi.h"
 
-#define TEST_DMA_MAX_SIZE    14000
+#define TEST_DMA_MAX_SIZE    4092
 #define TEST_BUFFER_SIZE 256     ///< buffer size of each wrdma buffer in fifo mode
 #define TEST_SEG_SIZE   25
 
@@ -92,7 +92,7 @@ void config_single_board_test_pin(void)
 static void init_master_hd(spi_device_handle_t* spi, const spitest_param_set_t* config, int freq)
 {
     spi_bus_config_t bus_cfg = SPI_BUS_TEST_DEFAULT_CONFIG();
-    bus_cfg.max_transfer_sz = TEST_DMA_MAX_SIZE;
+    bus_cfg.max_transfer_sz = TEST_DMA_MAX_SIZE*30;
     bus_cfg.quadhd_io_num = PIN_NUM_HD;
     bus_cfg.quadwp_io_num = PIN_NUM_WP;
 #if defined(TEST_MASTER_GPIO_MATRIX) && CONFIG_IDF_TARGET_ESP32S2
@@ -111,7 +111,7 @@ static void init_master_hd(spi_device_handle_t* spi, const spitest_param_set_t*
     TEST_ESP_OK(spi_bus_add_device(TEST_SPI_HOST, &dev_cfg, spi));
 }
 
-static void init_slave_hd(int mode, const spi_slave_hd_callback_config_t* callback)
+static void init_slave_hd(int mode, bool append_mode, const spi_slave_hd_callback_config_t* callback)
 {
     spi_bus_config_t bus_cfg = SPI_BUS_TEST_DEFAULT_CONFIG();
     bus_cfg.max_transfer_sz = TEST_DMA_MAX_SIZE*30;
@@ -123,6 +123,9 @@ static void init_slave_hd(int mode, const spi_slave_hd_callback_config_t* callba
     spi_slave_hd_slot_config_t slave_hd_cfg = SPI_SLOT_TEST_DEFAULT_CONFIG();
     slave_hd_cfg.mode = mode;
     slave_hd_cfg.dma_chan = TEST_SLAVE_HOST;
+    if (append_mode) {
+        slave_hd_cfg.flags |= SPI_SLAVE_HD_APPEND_MODE;
+    }
     if (callback) {
         slave_hd_cfg.cb_config = *callback;
     } else {
@@ -211,7 +214,7 @@ static void test_hd_start(spi_device_handle_t *spi, int freq, const spitest_para
         .cb_buffer_tx = rdbuf_cb,
         .arg = ctx,
     };
-    init_slave_hd(cfg->mode, &callback);
+    init_slave_hd(cfg->mode, 0, &callback);
 
     //when test with single board via same set of mosi, miso, clk and cs pins.
     config_single_board_test_pin();
@@ -503,7 +506,7 @@ TEST_CASE("test spi slave hd segment mode, master too long", "[spi][spi_slv_hd]"
     init_master_hd(&spi, cfg, freq);
 
     //no callback needed
-    init_slave_hd(cfg->mode, NULL);
+    init_slave_hd(cfg->mode, 0, NULL);
 
     //Use GPIO matrix to connect signal of master and slave via same set of pins on one board.
     config_single_board_test_pin();

+ 2 - 2
components/hal/esp32c3/include/hal/spi_ll.h

@@ -66,7 +66,7 @@ typedef enum {
     SPI_LL_INTR_WRBUF =         BIT(7),     ///< Has received WRBUF command. Only available in slave HD.
     SPI_LL_INTR_RDDMA =         BIT(8),     ///< Has received RDDMA command. Only available in slave HD.
     SPI_LL_INTR_WRDMA =         BIT(9),     ///< Has received WRDMA command. Only available in slave HD.
-    SPI_LL_INTR_WR_DONE =       BIT(10),    ///< Has received WR_DONE command. Only available in slave HD.
+    SPI_LL_INTR_CMD7  =         BIT(10),    ///< Has received CMD7 command. Only available in slave HD.
     SPI_LL_INTR_CMD8 =          BIT(11),    ///< Has received CMD8 command. Only available in slave HD.
     SPI_LL_INTR_CMD9 =          BIT(12),    ///< Has received CMD9 command. Only available in slave HD.
     SPI_LL_INTR_CMDA =          BIT(13),    ///< Has received CMDA command. Only available in slave HD.
@@ -976,7 +976,7 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
     item(SPI_LL_INTR_RDDMA,         dma_int_ena.rd_dma_done,        dma_int_raw.rd_dma_done,        dma_int_clr.rd_dma_done=1) \
     item(SPI_LL_INTR_WRDMA,         dma_int_ena.wr_dma_done,        dma_int_raw.wr_dma_done,        dma_int_clr.wr_dma_done=1) \
     item(SPI_LL_INTR_SEG_DONE,      dma_int_ena.dma_seg_trans_done, dma_int_raw.dma_seg_trans_done, dma_int_clr.dma_seg_trans_done=1) \
-    item(SPI_LL_INTR_WR_DONE,       dma_int_ena.cmd7,               dma_int_raw.cmd7,               dma_int_clr.cmd7=1) \
+    item(SPI_LL_INTR_CMD7,          dma_int_ena.cmd7,               dma_int_raw.cmd7,               dma_int_clr.cmd7=1) \
     item(SPI_LL_INTR_CMD8,          dma_int_ena.cmd8,               dma_int_raw.cmd8,               dma_int_clr.cmd8=1) \
     item(SPI_LL_INTR_CMD9,          dma_int_ena.cmd9,               dma_int_raw.cmd9,               dma_int_clr.cmd9=1) \
     item(SPI_LL_INTR_CMDA,          dma_int_ena.cmda,               dma_int_raw.cmda,               dma_int_clr.cmda=1)

+ 17 - 17
components/hal/esp32s2/include/hal/spi_ll.h

@@ -75,7 +75,7 @@ typedef enum {
     SPI_LL_INTR_WRBUF =         BIT(7),     ///< Has received WRBUF command. Only available in slave HD.
     SPI_LL_INTR_RDDMA =         BIT(8),     ///< Has received RDDMA command. Only available in slave HD.
     SPI_LL_INTR_WRDMA =         BIT(9),     ///< Has received WRDMA command. Only available in slave HD.
-    SPI_LL_INTR_WR_DONE =       BIT(10),    ///< Has received WR_DONE command. Only available in slave HD.
+    SPI_LL_INTR_CMD7 =          BIT(10),    ///< Has received CMD7 command. Only available in slave HD.
     SPI_LL_INTR_CMD8 =          BIT(11),    ///< Has received CMD8 command. Only available in slave HD.
     SPI_LL_INTR_CMD9 =          BIT(12),    ///< Has received CMD9 command. Only available in slave HD.
     SPI_LL_INTR_CMDA =          BIT(13),    ///< Has received CMDA command. Only available in slave HD.
@@ -150,7 +150,7 @@ static inline void spi_ll_slave_hd_init(spi_dev_t *hw)
     hw->slave.soft_reset = 1;
     hw->slave.soft_reset = 0;
 
-    hw->user.doutdin = 0; //we only support full duplex
+    hw->user.doutdin = 0; //we only support half duplex
     hw->slave.slave_mode = 1;
 }
 
@@ -955,21 +955,21 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
 //helper macros to generate code for each interrupts
 #define FOR_EACH_ITEM(op, list) do { list(op) } while(0)
 #define INTR_LIST(item)    \
-    item(SPI_LL_INTR_TRANS_DONE,    slave.int_trans_done_en,    slave.trans_done,           slave.trans_done=0) \
-    item(SPI_LL_INTR_RDBUF,         slave.int_rd_buf_done_en,   slv_rdbuf_dlen.rd_buf_done, slv_rdbuf_dlen.rd_buf_done=0) \
-    item(SPI_LL_INTR_WRBUF,         slave.int_wr_buf_done_en,   slv_wrbuf_dlen.wr_buf_done, slv_wrbuf_dlen.wr_buf_done=0) \
-    item(SPI_LL_INTR_RDDMA,         slave.int_rd_dma_done_en,   slv_rd_byte.rd_dma_done,    slv_rd_byte.rd_dma_done=0) \
-    item(SPI_LL_INTR_WRDMA,         slave.int_wr_dma_done_en,   slave1.wr_dma_done,         slave1.wr_dma_done=0) \
-    item(SPI_LL_INTR_IN_SUC_EOF,    dma_int_ena.in_suc_eof,     dma_int_raw.in_suc_eof,     dma_int_clr.in_suc_eof=1) \
-    item(SPI_LL_INTR_OUT_EOF,       dma_int_ena.out_eof,        dma_int_raw.out_eof,        dma_int_clr.out_eof=1) \
-    item(SPI_LL_INTR_OUT_TOTAL_EOF, dma_int_ena.out_total_eof,  dma_int_raw.out_total_eof,  dma_int_clr.out_total_eof=1) \
-    item(SPI_LL_INTR_SEG_DONE,      slave.int_dma_seg_trans_en, hold.dma_seg_trans_done,    hold.dma_seg_trans_done=0) \
-    item(SPI_LL_INTR_IN_FULL,       dma_int_ena.infifo_full_err, dma_int_raw.infifo_full_err, dma_int_clr.infifo_full_err=1) \
-    item(SPI_LL_INTR_OUT_EMPTY,     dma_int_ena.outfifo_empty_err, dma_int_raw.outfifo_empty_err,  dma_int_clr.outfifo_empty_err=1) \
-    item(SPI_LL_INTR_WR_DONE,       dma_int_ena.cmd7, dma_int_raw.cmd7,  dma_int_clr.cmd7=1) \
-    item(SPI_LL_INTR_CMD8,          dma_int_ena.cmd8, dma_int_raw.cmd8,  dma_int_clr.cmd8=1) \
-    item(SPI_LL_INTR_CMD9,          dma_int_ena.cmd9, dma_int_raw.cmd9,  dma_int_clr.cmd9=1) \
-    item(SPI_LL_INTR_CMDA,          dma_int_ena.cmda, dma_int_raw.cmda,  dma_int_clr.cmda=1)
+    item(SPI_LL_INTR_TRANS_DONE,    slave.int_trans_done_en,        slave.trans_done,               slave.trans_done=0) \
+    item(SPI_LL_INTR_RDBUF,         slave.int_rd_buf_done_en,       slv_rdbuf_dlen.rd_buf_done,     slv_rdbuf_dlen.rd_buf_done=0) \
+    item(SPI_LL_INTR_WRBUF,         slave.int_wr_buf_done_en,       slv_wrbuf_dlen.wr_buf_done,     slv_wrbuf_dlen.wr_buf_done=0) \
+    item(SPI_LL_INTR_RDDMA,         slave.int_rd_dma_done_en,       slv_rd_byte.rd_dma_done,        slv_rd_byte.rd_dma_done=0) \
+    item(SPI_LL_INTR_WRDMA,         slave.int_wr_dma_done_en,       slave1.wr_dma_done,             slave1.wr_dma_done=0) \
+    item(SPI_LL_INTR_IN_SUC_EOF,    dma_int_ena.in_suc_eof,         dma_int_raw.in_suc_eof,         dma_int_clr.in_suc_eof=1) \
+    item(SPI_LL_INTR_OUT_EOF,       dma_int_ena.out_eof,            dma_int_raw.out_eof,            dma_int_clr.out_eof=1) \
+    item(SPI_LL_INTR_OUT_TOTAL_EOF, dma_int_ena.out_total_eof,      dma_int_raw.out_total_eof,      dma_int_clr.out_total_eof=1) \
+    item(SPI_LL_INTR_SEG_DONE,      slave.int_dma_seg_trans_en,     hold.dma_seg_trans_done,        hold.dma_seg_trans_done=0) \
+    item(SPI_LL_INTR_IN_FULL,       dma_int_ena.infifo_full_err,    dma_int_raw.infifo_full_err,    dma_int_clr.infifo_full_err=1) \
+    item(SPI_LL_INTR_OUT_EMPTY,     dma_int_ena.outfifo_empty_err,  dma_int_raw.outfifo_empty_err,  dma_int_clr.outfifo_empty_err=1) \
+    item(SPI_LL_INTR_CMD7,          dma_int_ena.cmd7,               dma_int_raw.cmd7,               dma_int_clr.cmd7=1) \
+    item(SPI_LL_INTR_CMD8,          dma_int_ena.cmd8,               dma_int_raw.cmd8,               dma_int_clr.cmd8=1) \
+    item(SPI_LL_INTR_CMD9,          dma_int_ena.cmd9,               dma_int_raw.cmd9,               dma_int_clr.cmd9=1) \
+    item(SPI_LL_INTR_CMDA,          dma_int_ena.cmda,               dma_int_raw.cmda,               dma_int_clr.cmda=1)
 
 
 static inline void spi_ll_enable_intr(spi_dev_t* hw, spi_ll_intr_t intr_mask)

+ 2 - 2
components/hal/esp32s3/include/hal/spi_ll.h

@@ -66,7 +66,7 @@ typedef enum {
     SPI_LL_INTR_WRBUF =         BIT(7),     ///< Has received WRBUF command. Only available in slave HD.
     SPI_LL_INTR_RDDMA =         BIT(8),     ///< Has received RDDMA command. Only available in slave HD.
     SPI_LL_INTR_WRDMA =         BIT(9),     ///< Has received WRDMA command. Only available in slave HD.
-    SPI_LL_INTR_WR_DONE =       BIT(10),    ///< Has received WR_DONE command. Only available in slave HD.
+    SPI_LL_INTR_CMD7 =          BIT(10),    ///< Has received CMD7 command. Only available in slave HD.
     SPI_LL_INTR_CMD8 =          BIT(11),    ///< Has received CMD8 command. Only available in slave HD.
     SPI_LL_INTR_CMD9 =          BIT(12),    ///< Has received CMD9 command. Only available in slave HD.
     SPI_LL_INTR_CMDA =          BIT(13),    ///< Has received CMDA command. Only available in slave HD.
@@ -985,7 +985,7 @@ static inline uint32_t spi_ll_slave_get_rcv_bitlen(spi_dev_t *hw)
     item(SPI_LL_INTR_RDDMA,         dma_int_ena.rd_dma_done,        dma_int_raw.rd_dma_done,        dma_int_clr.rd_dma_done=1) \
     item(SPI_LL_INTR_WRDMA,         dma_int_ena.wr_dma_done,        dma_int_raw.wr_dma_done,        dma_int_clr.wr_dma_done=1) \
     item(SPI_LL_INTR_SEG_DONE,      dma_int_ena.dma_seg_trans_done, dma_int_raw.dma_seg_trans_done, dma_int_clr.dma_seg_trans_done=1) \
-    item(SPI_LL_INTR_WR_DONE,       dma_int_ena.cmd7,               dma_int_raw.cmd7,               dma_int_clr.cmd7=1) \
+    item(SPI_LL_INTR_CMD7,       dma_int_ena.cmd7,               dma_int_raw.cmd7,               dma_int_clr.cmd7=1) \
     item(SPI_LL_INTR_CMD8,          dma_int_ena.cmd8,               dma_int_raw.cmd8,               dma_int_clr.cmd8=1) \
     item(SPI_LL_INTR_CMD9,          dma_int_ena.cmd9,               dma_int_raw.cmd9,               dma_int_clr.cmd9=1) \
     item(SPI_LL_INTR_CMDA,          dma_int_ena.cmda,               dma_int_raw.cmda,               dma_int_clr.cmda=1)

+ 115 - 27
components/hal/include/hal/spi_slave_hd_hal.h

@@ -19,9 +19,9 @@
  ******************************************************************************/
 
 /*
- * The HAL layer for SPI Slave HD mode, currently only segment mode is supported
+ * The HAL layer for SPI Slave HD mode.
  *
- * Usage:
+ * Usage (segment mode):
  * - Firstly, initialize the slave with `spi_slave_hd_hal_init`
  *
  * - Event handling:
@@ -40,7 +40,7 @@
  * - RXDMA:
  *     - To receive data through DMA, call `spi_slave_hd_hal_rxdma`
  *     - When the operation is done, SPI_EV_RECV will be triggered.
- *     - Call ``spi_slave_hd_hal_rxdma_get_len`` to get the received length
+ *     - Call ``spi_slave_hd_hal_rxdma_seg_get_len`` to get the received length
  *
  *  - Shared buffer:
  *      - Call ``spi_slave_hd_hal_write_buffer`` to write the shared register buffer. When the buffer is
@@ -56,40 +56,60 @@
 #include "hal/spi_ll.h"
 #include "hal/spi_types.h"
 
+/**
+ * @brief Type of dma descriptor with appended members
+ *        this structure inherits DMA descriptor, with a pointer to the transaction descriptor passed from users.
+ */
+typedef struct {
+    lldesc_t      desc;                             ///< DMA descriptor
+    void          *arg;                             ///< This points to the transaction descriptor user passed in
+} spi_slave_hd_hal_desc_append_t;
+
 /// Configuration of the HAL
 typedef struct {
-    uint32_t      host_id;              ///< Host ID of the spi peripheral
-    spi_dma_dev_t *dma_in;              ///< Input  DMA(DMA -> RAM) peripheral register address
-    spi_dma_dev_t *dma_out;             ///< Output DMA(RAM -> DMA) peripheral register address
-    uint32_t      spics_io_num;         ///< CS GPIO pin for this device
-    uint8_t       mode;                 ///< SPI mode (0-3)
-    uint32_t      command_bits;         ///< command field bits, multiples of 8 and at least 8.
-    uint32_t      address_bits;         ///< address field bits, multiples of 8 and at least 8.
-    uint32_t      dummy_bits;           ///< dummy field bits, multiples of 8 and at least 8.
+    uint32_t      host_id;                          ///< Host ID of the spi peripheral
+    spi_dma_dev_t *dma_in;                          ///< Input  DMA(DMA -> RAM) peripheral register address
+    spi_dma_dev_t *dma_out;                         ///< Output DMA(RAM -> DMA) peripheral register address
+    uint32_t      dma_chan;                         ///< The dma channel used.
+    bool          append_mode;                      ///< True for DMA append mode, false for segment mode
+    uint32_t      spics_io_num;                     ///< CS GPIO pin for this device
+    uint8_t       mode;                             ///< SPI mode (0-3)
+    uint32_t      command_bits;                     ///< command field bits, multiples of 8 and at least 8.
+    uint32_t      address_bits;                     ///< address field bits, multiples of 8 and at least 8.
+    uint32_t      dummy_bits;                       ///< dummy field bits, multiples of 8 and at least 8.
 
     struct {
-        uint32_t  tx_lsbfirst : 1;      ///< Whether TX data should be sent with LSB first.
-        uint32_t  rx_lsbfirst : 1;      ///< Whether RX data should be read with LSB first.
+        uint32_t  tx_lsbfirst : 1;                  ///< Whether TX data should be sent with LSB first.
+        uint32_t  rx_lsbfirst : 1;                  ///< Whether RX data should be read with LSB first.
     };
-    uint32_t      dma_chan;             ///< The dma channel used.
 } spi_slave_hd_hal_config_t;
 
 /// Context of the HAL, initialized by :cpp:func:`spi_slave_hd_hal_init`.
 typedef struct {
-    spi_dev_t     *dev;                 ///< Beginning address of the peripheral registers.
-    spi_dma_dev_t *dma_in;              ///< Address of the DMA peripheral registers which stores the data received from a peripheral into RAM.
-    spi_dma_dev_t *dma_out;             ///< Address of the DMA peripheral registers which transmits the data from RAM to a peripheral.
-    lldesc_t      *dmadesc_tx;          /**< Array of DMA descriptor used by the TX DMA.
-                                         *  The amount should be larger than dmadesc_n. The driver should ensure that
-                                         *  the data to be sent is shorter than the descriptors can hold.
-                                         */
-    lldesc_t      *dmadesc_rx;          /**< Array of DMA descriptor used by the RX DMA.
-                                         *  The amount should be larger than dmadesc_n. The driver should ensure that
-                                         *  the data to be sent is shorter than the descriptors can hold.
-                                         */
+    /* These two need to be malloced by the driver first */
+    spi_slave_hd_hal_desc_append_t  *dmadesc_tx;            ///< Head of the TX DMA descriptors.
+    spi_slave_hd_hal_desc_append_t  *dmadesc_rx;            ///< Head of the RX DMA descriptors.
+
+    /* address of the hardware */
+    spi_dev_t                       *dev;                   ///< Beginning address of the peripheral registers.
+    spi_dma_dev_t                   *dma_in;                ///< Address of the DMA peripheral registers which stores the data received from a peripheral into RAM.
+    spi_dma_dev_t                   *dma_out;               ///< Address of the DMA peripheral registers which transmits the data from RAM to a peripheral.
+
+    bool                            append_mode;            ///< True for DMA append mode, false for segment mode
+    uint32_t                        dma_desc_num;           ///< Number of the available DMA descriptors. Calculated from ``bus_max_transfer_size``.
+    spi_slave_hd_hal_desc_append_t  *tx_cur_desc;           ///< Current TX DMA descriptor that could be linked (set up).
+    spi_slave_hd_hal_desc_append_t  *tx_dma_head;           ///< Head of the linked TX DMA descriptors which are not used by hardware
+    spi_slave_hd_hal_desc_append_t  *tx_dma_tail;           ///< Tail of the linked TX DMA descriptors which are not used by hardware
+    uint32_t                        tx_used_desc_cnt;       ///< Number of the TX descriptors that have been setup
+    uint32_t                        tx_recycled_desc_cnt;   ///< Number of the TX descriptors that could be recycled
+    spi_slave_hd_hal_desc_append_t  *rx_cur_desc;           ///< Current RX DMA descriptor that could be linked (set up).
+    spi_slave_hd_hal_desc_append_t  *rx_dma_head;           ///< Head of the linked RX DMA descriptors which are not used by hardware
+    spi_slave_hd_hal_desc_append_t  *rx_dma_tail;           ///< Tail of the linked RX DMA descriptors which are not used by hardware
+    uint32_t                        rx_used_desc_cnt;       ///< Number of the RX descriptors that have been setup
+    uint32_t                        rx_recycled_desc_cnt;   ///< Number of the RX descriptors that could be recycled
 
     /* Internal status used by the HAL implementation, initialized as 0. */
-    uint32_t      intr_not_triggered;
+    uint32_t                        intr_not_triggered;
 } spi_slave_hd_hal_context_t;
 
 /**
@@ -100,6 +120,23 @@ typedef struct {
  */
 void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_hal_config_t *hal_config);
 
+/**
+ * @brief Get the size of one DMA descriptor
+ *
+ * @param hal       Context of the HAL layer
+ * @param bus_size  SPI bus maximum transfer size, in bytes.
+ * @return          Total size needed for all the DMA descriptors
+ */
+uint32_t spi_slave_hd_hal_get_total_desc_size(spi_slave_hd_hal_context_t *hal, uint32_t bus_size);
+
+/**
+ * @brief Get the actual bus size
+ *
+ * @param hal       Context of the HAL layer
+ * @return          Actual bus transaction size
+ */
+uint32_t spi_salve_hd_hal_get_max_bus_size(spi_slave_hd_hal_context_t *hal);
+
 /**
  * @brief Check and clear signal of one event
  *
@@ -160,7 +197,7 @@ void spi_slave_hd_hal_rxdma(spi_slave_hd_hal_context_t *hal, uint8_t *out_buf, s
  * @param hal       Context of the HAL layer
  * @return          The received length
  */
-int spi_slave_hd_hal_rxdma_get_len(spi_slave_hd_hal_context_t *hal);
+int spi_slave_hd_hal_rxdma_seg_get_len(spi_slave_hd_hal_context_t *hal);
 
 ////////////////////////////////////////////////////////////////////////////////
 // TX DMA
@@ -212,3 +249,54 @@ int spi_slave_hd_hal_get_rxlen(spi_slave_hd_hal_context_t *hal);
  * @return          The address of last transaction
  */
 int spi_slave_hd_hal_get_last_addr(spi_slave_hd_hal_context_t *hal);
+
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+////////////////////////////////////////////////////////////////////////////////
+// Append Mode
+////////////////////////////////////////////////////////////////////////////////
+/**
+ * @brief Return the finished TX transaction
+ *
+ * @param hal            Context of the HAL layer
+ * @param out_trans      Pointer to the caller-defined transaction
+ * @return               1: Transaction is finished; 0: Transaction is not finished
+ */
+bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans);
+
+/**
+ * @brief Return the finished RX transaction
+ *
+ * @param hal            Context of the HAL layer
+ * @param out_trans      Pointer to the caller-defined transaction
+ * @param out_len        Actual number of bytes of received data
+ * @return               1: Transaction is finished; 0: Transaction is not finished
+ */
+bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, size_t *out_len);
+
+/**
+ * @brief Load the TX DMA descriptors without stopping the DMA
+ *
+ * @param hal            Context of the HAL layer
+ * @param data           Buffer of the transaction data
+ * @param len            Length of the data
+ * @param arg            Pointer used by the caller to indicate the tranaction. Will be returned by ``spi_slave_hd_hal_get_tx_finished_trans`` when transaction is finished
+ * @return
+ *        - ESP_OK: on success
+ *        - ESP_ERR_INVALID_STATE: Function called in invalid state.
+ */
+esp_err_t spi_slave_hd_hal_txdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg);
+
+/**
+ * @brief Load the RX DMA descriptors without stopping the DMA
+ *
+ * @param hal            Context of the HAL layer
+ * @param data           Buffer of the transaction data
+ * @param len            Length of the data
+ * @param arg            Pointer used by the caller to indicate the tranaction. Will be returned by ``spi_slave_hd_hal_get_rx_finished_trans`` when transaction is finished
+ * @return
+ *        - ESP_OK: on success
+ *        - ESP_ERR_INVALID_STATE: Function called in invalid state.
+ */
+esp_err_t spi_slave_hd_hal_rxdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg);
+#endif  //#if CONFIG_IDF_TARGET_ESP32S2

+ 7 - 7
components/hal/include/hal/spi_types.h

@@ -31,13 +31,13 @@ typedef enum {
 
 /// SPI Events
 typedef enum {
-    SPI_EV_BUF_TX = BIT(0), ///< The buffer has sent data to master, Slave HD only
-    SPI_EV_BUF_RX = BIT(1), ///< The buffer has received data from master, Slave HD only
-    SPI_EV_SEND =   BIT(2), ///< Has sent data to master through RDDMA, Slave HD only
-    SPI_EV_RECV =   BIT(3), ///< Has received data from master through WRDMA, Slave HD only
-    SPI_EV_CMD9 =   BIT(4), ///< Received CMD9 from master, Slave HD only
-    SPI_EV_CMDA =   BIT(5), ///< Received CMDA from master, Slave HD only
-    SPI_EV_TRANS =  BIT(6), ///< A transaction has done
+    SPI_EV_BUF_TX        = BIT(0), ///< The buffer has sent data to master, Slave HD only
+    SPI_EV_BUF_RX        = BIT(1), ///< The buffer has received data from master, Slave HD only
+    SPI_EV_SEND          = BIT(2), ///< Slave has loaded some data to DMA, and master has received certain number of the data, the number is determined by master. Slave HD only
+    SPI_EV_RECV          = BIT(3), ///< Slave has received certain number of data from master, the number is determined by master. Slave HD only.
+    SPI_EV_CMD9          = BIT(4), ///< Received CMD9 from master, Slave HD only
+    SPI_EV_CMDA          = BIT(5), ///< Received CMDA from master, Slave HD only
+    SPI_EV_TRANS         = BIT(6), ///< A transaction has done
 } spi_event_t;
 FLAG_ATTR(spi_event_t)
 

+ 2 - 2
components/hal/spi_hal.c

@@ -26,8 +26,8 @@
 #define spi_dma_ll_tx_enable_burst_data(dev, enable)         gdma_ll_tx_enable_data_burst(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
 #define spi_dma_ll_rx_enable_burst_desc(dev, enable)         gdma_ll_rx_enable_descriptor_burst(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
 #define spi_dma_ll_tx_enable_burst_desc(dev, enable)         gdma_ll_tx_enable_descriptor_burst(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
-#define spi_dma_enable_out_auto_wrback(dev, enable)          gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
-#define spi_dma_set_out_eof_generation(dev, enable)          gdma_ll_tx_set_eof_mode(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
+#define spi_dma_ll_enable_out_auto_wrback(dev, enable)          gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
+#define spi_dma_ll_set_out_eof_generation(dev, enable)          gdma_ll_tx_set_eof_mode(&GDMA, SOC_GDMA_SPI2_DMA_CHANNEL, enable);
 #endif
 
 static const char SPI_HAL_TAG[] = "spi_hal";

+ 2 - 2
components/hal/spi_slave_hal.c

@@ -11,8 +11,8 @@
 #define spi_dma_ll_tx_enable_burst_data(dev, enable)         gdma_ll_tx_enable_data_burst(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
 #define spi_dma_ll_rx_enable_burst_desc(dev, enable)         gdma_ll_rx_enable_descriptor_burst(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
 #define spi_dma_ll_tx_enable_burst_desc(dev, enable)         gdma_ll_tx_enable_descriptor_burst(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
-#define spi_dma_enable_out_auto_wrback(dev, enable)          gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
-#define spi_dma_set_out_eof_generation(dev, enable)          gdma_ll_tx_set_eof_mode(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
+#define spi_dma_ll_enable_out_auto_wrback(dev, enable)          gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
+#define spi_dma_ll_set_out_eof_generation(dev, enable)          gdma_ll_tx_set_eof_mode(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
 #endif
 
 static void s_spi_slave_hal_dma_init_config(const spi_slave_hal_context_t *hal)

+ 216 - 38
components/hal/spi_slave_hd_hal.c

@@ -35,8 +35,8 @@
 #define spi_dma_ll_tx_enable_burst_data(dev, enable)         gdma_ll_tx_enable_data_burst(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
 #define spi_dma_ll_rx_enable_burst_desc(dev, enable)         gdma_ll_rx_enable_descriptor_burst(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
 #define spi_dma_ll_tx_enable_burst_desc(dev, enable)         gdma_ll_tx_enable_descriptor_burst(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
-#define spi_dma_enable_out_auto_wrback(dev, enable)          gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
-#define spi_dma_set_out_eof_generation(dev, enable)          gdma_ll_tx_set_eof_mode(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
+#define spi_dma_ll_enable_out_auto_wrback(dev, enable)          gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
+#define spi_dma_ll_set_out_eof_generation(dev, enable)          gdma_ll_tx_set_eof_mode(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, enable);
 #define spi_dma_ll_rx_start(dev, addr) do {\
             gdma_ll_rx_set_desc_addr(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL, (uint32_t)addr);\
             gdma_ll_rx_start(&GDMA, SOC_GDMA_SPI3_DMA_CHANNEL);\
@@ -53,15 +53,18 @@ static void s_spi_slave_hd_hal_dma_init_config(const spi_slave_hd_hal_context_t
     spi_dma_ll_tx_enable_burst_data(hal->dma_out, 1);
     spi_dma_ll_rx_enable_burst_desc(hal->dma_in, 1);
     spi_dma_ll_tx_enable_burst_desc(hal->dma_out, 1);
+    spi_dma_ll_enable_out_auto_wrback(hal->dma_out, 1);
 }
 
 void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_hal_config_t *hal_config)
 {
-    memset(hal, 0, sizeof(spi_slave_hd_hal_context_t));
     spi_dev_t* hw = SPI_LL_GET_HW(hal_config->host_id);
     hal->dev = hw;
     hal->dma_in = hal_config->dma_in;
     hal->dma_out = hal_config->dma_out;
+    hal->append_mode = hal_config->append_mode;
+    hal->rx_cur_desc = hal->dmadesc_rx;
+    hal->tx_cur_desc = hal->dmadesc_tx;
 
     //Configure slave
     s_spi_slave_hd_hal_dma_init_config(hal);
@@ -76,23 +79,30 @@ void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_h
 
     spi_ll_disable_intr(hw, UINT32_MAX);
     spi_ll_clear_intr(hw, UINT32_MAX);
+    if (!hal_config->append_mode) {
+        spi_ll_set_intr(hw, SPI_LL_INTR_CMD7 | SPI_LL_INTR_CMD8);
 
-    spi_ll_set_intr(hw, SPI_LL_INTR_WR_DONE | SPI_LL_INTR_CMD8);
+        bool workaround_required = false;
+        if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD7)) {
+            hal->intr_not_triggered |= SPI_EV_RECV;
+            workaround_required = true;
+        }
+        if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD8)) {
+            hal->intr_not_triggered |= SPI_EV_SEND;
+            workaround_required = true;
+        }
 
-    bool workaround_required = false;
-    if (!spi_ll_get_intr(hw, SPI_LL_INTR_WR_DONE)) {
-        hal->intr_not_triggered |= SPI_EV_RECV;
-        workaround_required = true;
-    }
-    if (!spi_ll_get_intr(hw, SPI_LL_INTR_CMD8)) {
-        hal->intr_not_triggered |= SPI_EV_SEND;
-        workaround_required = true;
+        if (workaround_required) {
+            //Workaround if the previous interrupts are not writable
+            spi_ll_set_intr(hw, SPI_LL_INTR_TRANS_DONE);
+        }
     }
-
-    if (workaround_required) {
-        //Workaround if the previous interrupts are not writable
-        spi_ll_set_intr(hw, SPI_LL_INTR_TRANS_DONE);
+#if CONFIG_IDF_TARGET_ESP32S2
+    //Append mode is only supported on ESP32S2 now
+    else {
+        spi_ll_enable_intr(hw, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_CMD7);
     }
+#endif
 
     spi_ll_slave_hd_set_len_cond(hw,    SPI_LL_TRANS_LEN_COND_WRBUF |
                                         SPI_LL_TRANS_LEN_COND_WRDMA |
@@ -102,24 +112,41 @@ void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_h
     spi_ll_slave_set_seg_mode(hal->dev, true);
 }
 
+uint32_t spi_salve_hd_hal_get_max_bus_size(spi_slave_hd_hal_context_t *hal)
+{
+    return hal->dma_desc_num * LLDESC_MAX_NUM_PER_DESC;
+}
+
+uint32_t spi_slave_hd_hal_get_total_desc_size(spi_slave_hd_hal_context_t *hal, uint32_t bus_size)
+{
+    //See how many dma descriptors we need
+    int dma_desc_ct = (bus_size + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
+    if (dma_desc_ct == 0) {
+        dma_desc_ct = 1; //default to 4k when max is not given
+    }
+    hal->dma_desc_num = dma_desc_ct;
+
+    return hal->dma_desc_num * sizeof(spi_slave_hd_hal_desc_append_t);
+}
+
 void spi_slave_hd_hal_rxdma(spi_slave_hd_hal_context_t *hal, uint8_t *out_buf, size_t len)
 {
-    lldesc_setup_link(hal->dmadesc_rx, out_buf, len, true);
+    lldesc_setup_link(&hal->dmadesc_rx->desc, out_buf, len, true);
 
     spi_ll_dma_rx_fifo_reset(hal->dev);
     spi_dma_ll_rx_reset(hal->dma_in);
     spi_ll_slave_reset(hal->dev);
     spi_ll_infifo_full_clr(hal->dev);
-    spi_ll_clear_intr(hal->dev, SPI_LL_INTR_WR_DONE);
+    spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD7);
 
     spi_ll_slave_set_rx_bitlen(hal->dev, len * 8);
     spi_ll_dma_rx_enable(hal->dev, 1);
-    spi_dma_ll_rx_start(hal->dma_in, &hal->dmadesc_rx[0]);
+    spi_dma_ll_rx_start(hal->dma_in, &hal->dmadesc_rx->desc);
 }
 
 void spi_slave_hd_hal_txdma(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len)
 {
-    lldesc_setup_link(hal->dmadesc_tx, data, len, false);
+    lldesc_setup_link(&hal->dmadesc_tx->desc, data, len, false);
 
     spi_ll_dma_tx_fifo_reset(hal->dev);
     spi_dma_ll_tx_reset(hal->dma_out);
@@ -128,25 +155,29 @@ void spi_slave_hd_hal_txdma(spi_slave_hd_hal_context_t *hal, uint8_t *data, size
     spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD8);
 
     spi_ll_dma_tx_enable(hal->dev, 1);
-    spi_dma_ll_tx_start(hal->dma_out, &hal->dmadesc_tx[0]);
+    spi_dma_ll_tx_start(hal->dma_out, &hal->dmadesc_tx->desc);
 }
 
-static spi_ll_intr_t get_event_intr(spi_event_t ev)
+static spi_ll_intr_t get_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
 {
     spi_ll_intr_t intr = 0;
-    if (ev & SPI_EV_BUF_TX) intr |= SPI_LL_INTR_RDBUF;
-    if (ev & SPI_EV_BUF_RX) intr |= SPI_LL_INTR_WRBUF;
-    if (ev & SPI_EV_SEND) intr |= SPI_LL_INTR_CMD8;
-    if (ev & SPI_EV_RECV) intr |= SPI_LL_INTR_WR_DONE;
-    if (ev & SPI_EV_CMD9) intr |= SPI_LL_INTR_CMD9;
-    if (ev & SPI_EV_CMDA) intr |= SPI_LL_INTR_CMDA;
-    if (ev & SPI_EV_TRANS) intr |= SPI_LL_INTR_TRANS_DONE;
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+    if ((ev & SPI_EV_SEND) && hal->append_mode) intr |= SPI_LL_INTR_OUT_EOF;
+#endif
+    if ((ev & SPI_EV_SEND) && !hal->append_mode) intr |= SPI_LL_INTR_CMD8;
+    if (ev & SPI_EV_RECV)          intr |= SPI_LL_INTR_CMD7;
+    if (ev & SPI_EV_BUF_TX)        intr |= SPI_LL_INTR_RDBUF;
+    if (ev & SPI_EV_BUF_RX)        intr |= SPI_LL_INTR_WRBUF;
+    if (ev & SPI_EV_CMD9)          intr |= SPI_LL_INTR_CMD9;
+    if (ev & SPI_EV_CMDA)          intr |= SPI_LL_INTR_CMDA;
+    if (ev & SPI_EV_TRANS)         intr |= SPI_LL_INTR_TRANS_DONE;
     return intr;
 }
 
-bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t* hal, spi_event_t ev)
+bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
 {
-    spi_ll_intr_t intr = get_event_intr(ev);
+    spi_ll_intr_t intr = get_event_intr(hal, ev);
     if (spi_ll_get_intr(hal->dev, intr)) {
         spi_ll_clear_intr(hal->dev, intr);
         return true;
@@ -154,15 +185,15 @@ bool spi_slave_hd_hal_check_clear_event(spi_slave_hd_hal_context_t* hal, spi_eve
     return false;
 }
 
-bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t* hal, spi_event_t ev)
+bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
 {
     //The trans_done interrupt is used for the workaround when some interrupt is not writable
-    spi_ll_intr_t intr = get_event_intr(ev);
+    spi_ll_intr_t intr = get_event_intr(hal, ev);
 
     // Workaround for these interrupts not writable
     uint32_t missing_intr = hal->intr_not_triggered & ev;
     if (missing_intr) {
-        if ((missing_intr & SPI_EV_RECV) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_WR_DONE)) {
+        if ((missing_intr & SPI_EV_RECV) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_CMD7)) {
             hal->intr_not_triggered &= ~SPI_EV_RECV;
         }
         if ((missing_intr & SPI_EV_SEND) && spi_ll_get_intr(hal->dev, SPI_LL_INTR_CMD8)) {
@@ -182,13 +213,13 @@ bool spi_slave_hd_hal_check_disable_event(spi_slave_hd_hal_context_t* hal, spi_e
 
 void spi_slave_hd_hal_enable_event_intr(spi_slave_hd_hal_context_t* hal, spi_event_t ev)
 {
-    spi_ll_intr_t intr = get_event_intr(ev);
+    spi_ll_intr_t intr = get_event_intr(hal, ev);
     spi_ll_enable_intr(hal->dev, intr);
 }
 
 void spi_slave_hd_hal_invoke_event_intr(spi_slave_hd_hal_context_t* hal, spi_event_t ev)
 {
-    spi_ll_intr_t intr = get_event_intr(ev);
+    spi_ll_intr_t intr = get_event_intr(hal, ev);
 
     // Workaround for these interrupts not writable
     if (hal->intr_not_triggered & ev & (SPI_EV_RECV | SPI_EV_SEND)) {
@@ -219,8 +250,155 @@ int spi_slave_hd_hal_get_rxlen(spi_slave_hd_hal_context_t *hal)
     return spi_ll_slave_get_rx_byte_len(hal->dev);
 }
 
-int spi_slave_hd_hal_rxdma_get_len(spi_slave_hd_hal_context_t *hal)
+int spi_slave_hd_hal_rxdma_seg_get_len(spi_slave_hd_hal_context_t *hal)
 {
-    lldesc_t* desc = &hal->dmadesc_rx[0];
+    lldesc_t* desc = &hal->dmadesc_rx->desc;
     return lldesc_get_received_len(desc, NULL);
 }
+
+bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans)
+{
+    if (!hal->tx_dma_head || hal->tx_dma_head->desc.owner) {
+        return false;
+    }
+
+    *out_trans = hal->tx_dma_head->arg;
+    hal->tx_recycled_desc_cnt++;
+    hal->tx_dma_head = (spi_slave_hd_hal_desc_append_t *)STAILQ_NEXT(&hal->tx_dma_head->desc, qe);
+
+    return true;
+}
+
+bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, size_t *out_len)
+{
+    if (!hal->rx_dma_head || hal->rx_dma_head->desc.owner) {
+        return false;
+    }
+
+    *out_trans = hal->rx_dma_head->arg;
+    *out_len = hal->rx_dma_head->desc.length;
+    hal->rx_recycled_desc_cnt++;
+    hal->rx_dma_head = (spi_slave_hd_hal_desc_append_t *)STAILQ_NEXT(&hal->rx_dma_head->desc, qe);
+
+    return true;
+}
+
+#if CONFIG_IDF_TARGET_ESP32S2
+//Append mode is only supported on ESP32S2 now
+static void spi_slave_hd_hal_link_append_desc(spi_slave_hd_hal_desc_append_t *dmadesc, const void *data, int len, bool isrx, void *arg)
+{
+    assert(len <= LLDESC_MAX_NUM_PER_DESC);     //TODO: Add support for transaction with length larger than 4092, IDF-2660
+    int n = 0;
+    while (len) {
+        int dmachunklen = len;
+        if (dmachunklen > LLDESC_MAX_NUM_PER_DESC) {
+            dmachunklen = LLDESC_MAX_NUM_PER_DESC;
+        }
+        if (isrx) {
+            //Receive needs DMA length rounded to next 32-bit boundary
+            dmadesc[n].desc.size = (dmachunklen + 3) & (~3);
+            dmadesc[n].desc.length = (dmachunklen + 3) & (~3);
+        } else {
+            dmadesc[n].desc.size = dmachunklen;
+            dmadesc[n].desc.length = dmachunklen;
+        }
+        dmadesc[n].desc.buf = (uint8_t *)data;
+        dmadesc[n].desc.eof = 0;
+        dmadesc[n].desc.sosf = 0;
+        dmadesc[n].desc.owner = 1;
+        dmadesc[n].desc.qe.stqe_next = &dmadesc[n + 1].desc;
+        dmadesc[n].arg = arg;
+        len -= dmachunklen;
+        data += dmachunklen;
+        n++;
+    }
+    dmadesc[n - 1].desc.eof = 1; //Mark last DMA desc as end of stream.
+    dmadesc[n - 1].desc.qe.stqe_next = NULL;
+}
+
+esp_err_t spi_slave_hd_hal_txdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg)
+{
+    //Check if there are enough available DMA descriptors for software to use
+    int num_required = (len + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
+    int not_recycled_desc_num = hal->tx_used_desc_cnt - hal->tx_recycled_desc_cnt;
+    int available_desc_num = hal->dma_desc_num - not_recycled_desc_num;
+    if (num_required > available_desc_num) {
+        return ESP_ERR_INVALID_STATE;
+    }
+
+    spi_slave_hd_hal_link_append_desc(hal->tx_cur_desc, data, len, false, arg);
+
+    if (!hal->tx_dma_head) {
+        //start a new link
+        hal->tx_dma_head = hal->tx_cur_desc;
+        hal->tx_dma_tail = hal->tx_cur_desc;
+
+        spi_dma_ll_tx_reset(hal->dma_out);
+        spi_ll_outfifo_empty_clr(hal->dev);
+        spi_ll_clear_intr(hal->dev, SPI_LL_INTR_OUT_EOF);
+
+        spi_ll_dma_tx_enable(hal->dev, 1);
+        spi_dma_ll_tx_start(hal->dma_out, &hal->tx_dma_head->desc);
+    } else {
+        //there is already a link
+        STAILQ_NEXT(&hal->tx_dma_tail->desc, qe) = &hal->tx_cur_desc->desc;
+        hal->tx_dma_tail = hal->tx_cur_desc;
+
+        spi_dma_ll_tx_restart(hal->dma_out);
+    }
+
+    //Move the current descriptor pointer according to the number of the linked descriptors
+    for (int i = 0; i < num_required; i++) {
+        hal->tx_used_desc_cnt++;
+        hal->tx_cur_desc++;
+        if (hal->tx_cur_desc == hal->dmadesc_tx + hal->dma_desc_num) {
+            hal->tx_cur_desc = hal->dmadesc_tx;
+        }
+    }
+
+    return ESP_OK;
+}
+
+esp_err_t spi_slave_hd_hal_rxdma_append(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len, void *arg)
+{
+    //Check if there are enough available dma descriptors for software to use
+    int num_required = (len + LLDESC_MAX_NUM_PER_DESC - 1) / LLDESC_MAX_NUM_PER_DESC;
+    int not_recycled_desc_num = hal->rx_used_desc_cnt - hal->rx_recycled_desc_cnt;
+    int available_desc_num = hal->dma_desc_num - not_recycled_desc_num;
+    if (num_required > available_desc_num) {
+        return ESP_ERR_INVALID_STATE;
+    }
+
+    spi_slave_hd_hal_link_append_desc(hal->rx_cur_desc, data, len, false, arg);
+
+    if (!hal->rx_dma_head) {
+        //start a new link
+        hal->rx_dma_head = hal->rx_cur_desc;
+        hal->rx_dma_tail = hal->rx_cur_desc;
+
+        spi_dma_ll_rx_reset(hal->dma_in);
+        spi_ll_infifo_full_clr(hal->dev);
+        spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD7);
+
+        spi_ll_dma_rx_enable(hal->dev, 1);
+        spi_dma_ll_rx_start(hal->dma_in, &hal->rx_dma_head->desc);
+    } else {
+        //there is already a link
+        STAILQ_NEXT(&hal->rx_dma_tail->desc, qe) = &hal->rx_cur_desc->desc;
+        hal->rx_dma_tail = hal->rx_cur_desc;
+
+        spi_dma_ll_rx_restart(hal->dma_in);
+    }
+
+    //Move the current descriptor pointer according to the number of the linked descriptors
+    for (int i = 0; i < num_required; i++) {
+        hal->rx_used_desc_cnt++;
+        hal->rx_cur_desc++;
+        if (hal->rx_cur_desc == hal->dmadesc_rx + hal->dma_desc_num) {
+            hal->rx_cur_desc = hal->dmadesc_rx;
+        }
+    }
+
+    return ESP_OK;
+}
+#endif  //#if CONFIG_IDF_TARGET_ESP32S2