Răsfoiți Sursa

Merge branch 'feature/esp_gdma_ops' into 'master'

feat(esp_gdma): add hal interface for common operations

Closes IDF-7809

See merge request espressif/esp-idf!24433
morris 2 ani în urmă
părinte
comite
682e5ae450
62 a modificat fișierele cu 2839 adăugiri și 798 ștergeri
  1. 5 17
      components/driver/test_apps/parlio/main/test_app_main.c
  2. 1 1
      components/driver/test_apps/spi/param/main/test_app_main.c
  3. 1 1
      components/driver/test_apps/spi/slave/main/test_app_main.c
  4. 1 1
      components/esp_hw_support/CMakeLists.txt
  5. 0 2
      components/esp_hw_support/dma/async_memcpy_impl_gdma.c
  6. 227 231
      components/esp_hw_support/dma/gdma.c
  7. 6 3
      components/esp_hw_support/dma/gdma_priv.h
  8. 69 0
      components/esp_hw_support/dma/linker.lf
  9. 25 5
      components/esp_hw_support/include/esp_private/gdma.h
  10. 0 5
      components/esp_hw_support/linker.lf
  11. 2 2
      components/esp_hw_support/test_apps/dma/main/test_async_memcpy.c
  12. 10 9
      components/esp_hw_support/test_apps/dma/main/test_gdma.c
  13. 13 1
      components/hal/CMakeLists.txt
  14. 1 1
      components/hal/adc_hal.c
  15. 8 3
      components/hal/esp32c2/include/hal/gdma_ll.h
  16. 8 3
      components/hal/esp32c3/include/hal/gdma_ll.h
  17. 14 10
      components/hal/esp32c6/include/hal/gdma_ll.h
  18. 14 10
      components/hal/esp32h2/include/hal/gdma_ll.h
  19. 505 0
      components/hal/esp32p4/include/ahb_dma_ll.h
  20. 453 0
      components/hal/esp32p4/include/axi_dma_ll.h
  21. 43 0
      components/hal/esp32p4/include/gdma_ll.h
  22. 52 15
      components/hal/esp32s3/include/hal/gdma_ll.h
  23. 0 13
      components/hal/gdma_hal.c
  24. 181 0
      components/hal/gdma_hal_ahb_v1.c
  25. 167 0
      components/hal/gdma_hal_ahb_v2.c
  26. 167 0
      components/hal/gdma_hal_axi.c
  27. 91 0
      components/hal/gdma_hal_top.c
  28. 99 15
      components/hal/include/hal/gdma_hal.h
  29. 49 0
      components/hal/include/hal/gdma_hal_ahb.h
  30. 49 0
      components/hal/include/hal/gdma_hal_axi.h
  31. 1 2
      components/hal/include/hal/gdma_types.h
  32. 1 1
      components/hal/spi_hal.c
  33. 1 1
      components/hal/spi_hal_iram.c
  34. 1 1
      components/hal/spi_slave_hal.c
  35. 1 1
      components/hal/spi_slave_hal_iram.c
  36. 1 1
      components/hal/spi_slave_hd_hal.c
  37. 5 14
      components/mbedtls/port/aes/dma/esp_aes_gdma_impl.c
  38. 1 0
      components/mbedtls/port/crypto_shared_gdma/esp_crypto_shared_gdma.c
  39. 1 1
      components/mbedtls/test_apps/main/test_mbedtls_sha.c
  40. 1 1
      components/mbedtls/test_apps/main/test_sha_perf.c
  41. 9 5
      components/soc/esp32c2/include/soc/Kconfig.soc_caps.in
  42. 11 1
      components/soc/esp32c2/include/soc/gdma_channel.h
  43. 5 5
      components/soc/esp32c2/include/soc/soc_caps.h
  44. 10 6
      components/soc/esp32c3/include/soc/Kconfig.soc_caps.in
  45. 13 1
      components/soc/esp32c3/include/soc/gdma_channel.h
  46. 6 5
      components/soc/esp32c3/include/soc/soc_caps.h
  47. 10 2
      components/soc/esp32c6/include/soc/Kconfig.soc_caps.in
  48. 13 0
      components/soc/esp32c6/include/soc/gdma_channel.h
  49. 5 3
      components/soc/esp32c6/include/soc/soc_caps.h
  50. 10 2
      components/soc/esp32h2/include/soc/Kconfig.soc_caps.in
  51. 13 0
      components/soc/esp32h2/include/soc/gdma_channel.h
  52. 5 3
      components/soc/esp32h2/include/soc/soc_caps.h
  53. 8 4
      components/soc/esp32p4/include/soc/Kconfig.soc_caps.in
  54. 171 175
      components/soc/esp32p4/include/soc/ahb_dma_struct.h
  55. 190 191
      components/soc/esp32p4/include/soc/axi_dma_struct.h
  56. 38 0
      components/soc/esp32p4/include/soc/gdma_channel.h
  57. 6 4
      components/soc/esp32p4/include/soc/soc_caps.h
  58. 13 5
      components/soc/esp32s3/include/soc/Kconfig.soc_caps.in
  59. 18 1
      components/soc/esp32s3/include/soc/gdma_channel.h
  60. 7 10
      components/soc/esp32s3/include/soc/soc_caps.h
  61. 3 3
      components/soc/include/soc/gdma_periph.h
  62. 0 1
      tools/ci/check_copyright_ignore.txt

+ 5 - 17
components/driver/test_apps/parlio/main/test_app_main.c

@@ -6,33 +6,21 @@
 
 #include "unity.h"
 #include "unity_test_runner.h"
+#include "unity_test_utils.h"
 #include "esp_heap_caps.h"
 
 // Some resources are lazy allocated in pulse_cnt driver, the threshold is left for that case
-#define TEST_MEMORY_LEAK_THRESHOLD (-300)
-
-static size_t before_free_8bit;
-static size_t before_free_32bit;
-
-static void check_leak(size_t before_free, size_t after_free, const char *type)
-{
-    ssize_t delta = after_free - before_free;
-    printf("MALLOC_CAP_%s: Before %u bytes free, After %u bytes free (delta %d)\n", type, before_free, after_free, delta);
-    TEST_ASSERT_MESSAGE(delta >= TEST_MEMORY_LEAK_THRESHOLD, "memory leak");
-}
+#define TEST_MEMORY_LEAK_THRESHOLD (400)
 
 void setUp(void)
 {
-    before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
-    before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
+    unity_utils_record_free_mem();
 }
 
 void tearDown(void)
 {
-    size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
-    size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
-    check_leak(before_free_8bit, after_free_8bit, "8BIT");
-    check_leak(before_free_32bit, after_free_32bit, "32BIT");
+    esp_reent_cleanup();    //clean up some of the newlib's lazy allocations
+    unity_utils_evaluate_leaks_direct(TEST_MEMORY_LEAK_THRESHOLD);
 }
 
 void app_main(void)

+ 1 - 1
components/driver/test_apps/spi/param/main/test_app_main.c

@@ -8,7 +8,7 @@
 #include "unity_test_utils.h"
 #include "esp_heap_caps.h"
 
-#define TEST_MEMORY_LEAK_THRESHOLD (150)
+#define TEST_MEMORY_LEAK_THRESHOLD (200)
 
 static size_t before_free_8bit;
 static size_t before_free_32bit;

+ 1 - 1
components/driver/test_apps/spi/slave/main/test_app_main.c

@@ -8,7 +8,7 @@
 #include "unity_test_utils.h"
 #include "esp_heap_caps.h"
 
-#define TEST_MEMORY_LEAK_THRESHOLD (120)
+#define TEST_MEMORY_LEAK_THRESHOLD (200)
 
 static size_t before_free_8bit;
 static size_t before_free_32bit;

+ 1 - 1
components/esp_hw_support/CMakeLists.txt

@@ -137,7 +137,7 @@ idf_component_register(SRCS ${srcs}
                        PRIV_INCLUDE_DIRS port/include include/esp_private
                        REQUIRES ${requires}
                        PRIV_REQUIRES "${priv_requires}"
-                       LDFRAGMENTS linker.lf)
+                       LDFRAGMENTS linker.lf dma/linker.lf)
 
 idf_build_get_property(target IDF_TARGET)
 add_subdirectory(port/${target})

+ 0 - 2
components/esp_hw_support/dma/async_memcpy_impl_gdma.c

@@ -8,8 +8,6 @@
 #include "soc/periph_defs.h"
 #include "soc/soc_memory_layout.h"
 #include "soc/soc_caps.h"
-#include "hal/gdma_ll.h"
-#include "hal/gdma_hal.h"
 #include "esp_private/periph_ctrl.h"
 #include "esp_log.h"
 #include "esp_attr.h"

+ 227 - 231
components/esp_hw_support/dma/gdma.c

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -40,12 +40,12 @@ static const char *TAG = "gdma";
  */
 
 typedef struct gdma_platform_t {
-    portMUX_TYPE spinlock;                 // platform level spinlock
-    gdma_group_t *groups[SOC_GDMA_GROUPS]; // array of GDMA group instances
-    int group_ref_counts[SOC_GDMA_GROUPS]; // reference count used to protect group install/uninstall
+    portMUX_TYPE spinlock;                         // platform level spinlock
+    gdma_group_t *groups[SOC_GDMA_NUM_GROUPS_MAX]; // array of GDMA group instances
+    int group_ref_counts[SOC_GDMA_NUM_GROUPS_MAX]; // reference count used to protect group install/uninstall
 } gdma_platform_t;
 
-static gdma_group_t *gdma_acquire_group_handle(int group_id);
+static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config));
 static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id);
 static void gdma_release_group_handle(gdma_group_t *group);
 static void gdma_release_pair_handle(gdma_pair_t *pair);
@@ -57,10 +57,17 @@ static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan);
 // gdma driver platform
 static gdma_platform_t s_platform = {
     .spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
-    .groups = {} // groups will be lazy installed
 };
 
-esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
+typedef struct {
+    int bus_id;
+    int start_group_id;
+    int end_group_id;
+    int pairs_per_group;
+    void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
+} gdma_channel_search_info_t;
+
+static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *search_info, const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
 {
     esp_err_t ret = ESP_OK;
     gdma_tx_channel_t *alloc_tx_channel = NULL;
@@ -68,7 +75,7 @@ esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_chann
     int search_code = 0;
     gdma_pair_t *pair = NULL;
     gdma_group_t *group = NULL;
-    ESP_GOTO_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
+    ESP_RETURN_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
 
     if (config->flags.reserve_sibling) {
         search_code = SEARCH_REQUEST_RX_CHANNEL | SEARCH_REQUEST_TX_CHANNEL; // search for a pair of channels
@@ -94,10 +101,15 @@ esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_chann
         goto search_done; // skip the search path below if user has specify a sibling channel
     }
 
-    for (int i = 0; i < SOC_GDMA_GROUPS && search_code; i++) { // loop to search group
-        group = gdma_acquire_group_handle(i);
+    int start_group_id = search_info->start_group_id;
+    int end_group_id = search_info->end_group_id;
+    int pairs_per_group = search_info->pairs_per_group;
+
+    for (int i = start_group_id; i < end_group_id && search_code; i++) { // loop to search group
+        group = gdma_acquire_group_handle(i, search_info->hal_init);
+        group->bus_id = search_info->bus_id;
         ESP_GOTO_ON_FALSE(group, ESP_ERR_NO_MEM, err, TAG, "no mem for group(%d)", i);
-        for (int j = 0; j < SOC_GDMA_PAIRS_PER_GROUP && search_code; j++) { // loop to search pair
+        for (int j = 0; j < pairs_per_group && search_code; j++) { // loop to search pair
             pair = gdma_acquire_pair_handle(group, j);
             ESP_GOTO_ON_FALSE(pair, ESP_ERR_NO_MEM, err, TAG, "no mem for pair(%d,%d)", i, j);
             portENTER_CRITICAL(&pair->spinlock);
@@ -160,15 +172,49 @@ err:
     return ret;
 }
 
+#if SOC_AHB_GDMA_SUPPORTED
+esp_err_t gdma_new_ahb_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
+{
+    gdma_channel_search_info_t search_info = {
+        .bus_id = SOC_GDMA_BUS_AHB,
+        .start_group_id = GDMA_LL_AHB_GROUP_START_ID,
+        .end_group_id = GDMA_LL_AHB_GROUP_START_ID + GDMA_LL_AHB_NUM_GROUPS,
+        .pairs_per_group = GDMA_LL_AHB_PAIRS_PER_GROUP,
+        .hal_init = gdma_ahb_hal_init,
+    };
+    return do_allocate_gdma_channel(&search_info, config, ret_chan);
+}
+
+esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
+__attribute__((alias("gdma_new_ahb_channel")));
+#endif // SOC_AHB_GDMA_SUPPORTED
+
+#if SOC_AXI_GDMA_SUPPORTED
+esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
+{
+    gdma_channel_search_info_t search_info = {
+        .bus_id = SOC_GDMA_BUS_AXI,
+        .start_group_id = GDMA_LL_AXI_GROUP_START_ID,
+        .end_group_id = GDMA_LL_AXI_GROUP_START_ID + GDMA_LL_AXI_NUM_GROUPS,
+        .pairs_per_group = GDMA_LL_AXI_PAIRS_PER_GROUP,
+        .hal_init = gdma_axi_hal_init,
+    };
+    return do_allocate_gdma_channel(&search_info, config, ret_chan);
+}
+#endif // SOC_AXI_GDMA_SUPPORTED
+
 esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan)
 {
-    esp_err_t ret = ESP_OK;
-    ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
+    ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
-    ret = dma_chan->del(dma_chan); // call `gdma_del_tx_channel` or `gdma_del_rx_channel`
+    // reset the channel priority to default
+    gdma_hal_set_priority(hal, pair->pair_id, dma_chan->direction, 0);
 
-err:
-    return ret;
+    // call `gdma_del_tx_channel` or `gdma_del_rx_channel` under the hood
+    return dma_chan->del(dma_chan);
 }
 
 esp_err_t gdma_get_channel_id(gdma_channel_handle_t dma_chan, int *channel_id)
@@ -184,13 +230,17 @@ err:
 
 esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_periph)
 {
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
     ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
     ESP_RETURN_ON_FALSE(dma_chan->periph_id == GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "channel is using by peripheral: %d", dma_chan->periph_id);
-    pair = dma_chan->pair;
-    group = pair->group;
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
     bool periph_conflict = false;
+    //
+    if (trig_periph.bus_id != SOC_GDMA_BUS_ANY) {
+        ESP_RETURN_ON_FALSE(trig_periph.bus_id == group->bus_id, ESP_ERR_INVALID_ARG, TAG,
+                            "peripheral and DMA system bus mismatch");
+    }
 
     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
         if (trig_periph.instance_id >= 0) {
@@ -202,10 +252,6 @@ esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_perip
             }
             portEXIT_CRITICAL(&group->spinlock);
         }
-        if (!periph_conflict) {
-            gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
-            gdma_ll_tx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.periph, trig_periph.instance_id);
-        }
     } else {
         if (trig_periph.instance_id >= 0) {
             portENTER_CRITICAL(&group->spinlock);
@@ -216,26 +262,22 @@ esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_perip
             }
             portEXIT_CRITICAL(&group->spinlock);
         }
-        if (!periph_conflict) {
-            gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
-            gdma_ll_rx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.periph, trig_periph.instance_id);
-        }
     }
 
     ESP_RETURN_ON_FALSE(!periph_conflict, ESP_ERR_INVALID_STATE, TAG, "peripheral %d is already used by another channel", trig_periph.instance_id);
+    gdma_hal_connect_peri(hal, pair->pair_id, dma_chan->direction, trig_periph.periph, trig_periph.instance_id);
     dma_chan->periph_id = trig_periph.instance_id;
     return ESP_OK;
 }
 
 esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
 {
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
     ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
     ESP_RETURN_ON_FALSE(dma_chan->periph_id != GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "no peripheral is connected to the channel");
 
-    pair = dma_chan->pair;
-    group = pair->group;
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
     int save_periph_id = dma_chan->periph_id;
 
     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
@@ -244,29 +286,26 @@ esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
             group->tx_periph_in_use_mask &= ~(1 << save_periph_id);
             portEXIT_CRITICAL(&group->spinlock);
         }
-        gdma_ll_tx_disconnect_from_periph(group->hal.dev, pair->pair_id);
     } else {
         if (save_periph_id >= 0) {
             portENTER_CRITICAL(&group->spinlock);
             group->rx_periph_in_use_mask &= ~(1 << save_periph_id);
             portEXIT_CRITICAL(&group->spinlock);
         }
-        gdma_ll_rx_disconnect_from_periph(group->hal.dev, pair->pair_id);
     }
 
+    gdma_hal_disconnect_peri(hal, pair->pair_id, dma_chan->direction);
+
     dma_chan->periph_id = GDMA_INVALID_PERIPH_TRIG;
     return ESP_OK;
 }
 
 esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_t *mask)
 {
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
     ESP_RETURN_ON_FALSE(dma_chan && mask, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
-
-    uint32_t free_mask = GDMA_LL_M2M_FREE_PERIPH_ID_MASK;
-    pair = dma_chan->pair;
-    group = pair->group;
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    uint32_t free_mask = group->hal.priv_data->m2m_free_periph_mask;
 
     portENTER_CRITICAL(&group->spinlock);
     free_mask &= ~(group->tx_periph_in_use_mask);
@@ -279,206 +318,166 @@ esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_
 
 esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
 {
-    esp_err_t ret = ESP_OK;
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
-    bool en_burst = true;
-    ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    ESP_RETURN_ON_FALSE(dma_chan && ability, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
+
     size_t sram_alignment = ability->sram_trans_align;
     size_t psram_alignment = ability->psram_trans_align;
     // alignment should be 2^n
-    ESP_GOTO_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid sram alignment: %zu", sram_alignment);
+    ESP_RETURN_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG,
+                        TAG, "invalid sram alignment: %zu", sram_alignment);
 
-#if SOC_GDMA_SUPPORT_PSRAM
     uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA);
-    int block_size_index = 0;
-    switch (psram_alignment) {
-    case 64: // 64 Bytes alignment
-        block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_64B;
-        break;
-    case 32: // 32 Bytes alignment
-        block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_32B;
-        break;
-    case 16: // 16 Bytes alignment
-        block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_16B;
-        break;
-    case 0: // no alignment is requirement
-        block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_16B;
-        psram_alignment = data_cache_line_size; // fall back to use the same size of the psram data cache line size
-        break;
-    default:
-        ESP_GOTO_ON_FALSE(false, ESP_ERR_INVALID_ARG, err, TAG, "invalid psram alignment: %zu", psram_alignment);
-        break;
-    }
-    ESP_GOTO_ON_FALSE(((psram_alignment % data_cache_line_size) == 0), ESP_ERR_INVALID_ARG, err, TAG, "psram alignment (%d)B should be multiple of the data cache line size (%d)B", psram_alignment, data_cache_line_size);
-#endif // #if SOC_GDMA_SUPPORT_PSRAM
+    if (psram_alignment == 0) {
+        // fall back to use the same size of the psram data cache line size
+        psram_alignment = data_cache_line_size;
+    }
+    if (psram_alignment > data_cache_line_size) {
+        ESP_RETURN_ON_FALSE(((psram_alignment % data_cache_line_size) == 0), ESP_ERR_INVALID_ARG,
+                            TAG, "psram_alignment(%d) should be multiple of the data_cache_line_size(%d)",
+                            psram_alignment, data_cache_line_size);
+    }
 
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
-        // TX channel can always enable burst mode, no matter data alignment
-        gdma_ll_tx_enable_data_burst(group->hal.dev, pair->pair_id, true);
-        gdma_ll_tx_enable_descriptor_burst(group->hal.dev, pair->pair_id, true);
-#if SOC_GDMA_SUPPORT_PSRAM
-        gdma_ll_tx_set_block_size_psram(group->hal.dev, pair->pair_id, block_size_index);
-#endif // #if SOC_GDMA_SUPPORT_PSRAM
-    } else {
+    // if the DMA can't access the PSRAM, this HAL function is no-op
+    gdma_hal_set_ext_mem_align(hal, pair->pair_id, dma_chan->direction, psram_alignment);
+
+    // TX channel can always enable burst mode, no matter data alignment
+    bool en_burst = true;
+    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
         // RX channel burst mode depends on specific data alignment
         en_burst = sram_alignment >= 4;
-        gdma_ll_rx_enable_data_burst(group->hal.dev, pair->pair_id, en_burst);
-        gdma_ll_rx_enable_descriptor_burst(group->hal.dev, pair->pair_id, en_burst);
-#if SOC_GDMA_SUPPORT_PSRAM
-        gdma_ll_rx_set_block_size_psram(group->hal.dev, pair->pair_id, block_size_index);
-#endif // #if SOC_GDMA_SUPPORT_PSRAM
     }
+    gdma_hal_enable_burst(hal, pair->pair_id, dma_chan->direction, en_burst, en_burst);
 
     dma_chan->sram_alignment = sram_alignment;
     dma_chan->psram_alignment = psram_alignment;
     ESP_LOGD(TAG, "%s channel (%d,%d), (%u:%u) bytes aligned, burst %s", dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX ? "tx" : "rx",
              group->group_id, pair->pair_id, sram_alignment, psram_alignment, en_burst ? "enabled" : "disabled");
-err:
-    return ret;
+
+    return ESP_OK;
 }
 
 esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strategy_config_t *config)
 {
-    esp_err_t ret = ESP_OK;
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
-    ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
-        gdma_ll_tx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
-        gdma_ll_tx_enable_auto_write_back(group->hal.dev, pair->pair_id, config->auto_update_desc);
-    } else {
-        gdma_ll_rx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
-    }
+    gdma_hal_set_strategy(hal, pair->pair_id, dma_chan->direction, config->owner_check, config->auto_update_desc);
 
-err:
-    return ret;
+    return ESP_OK;
 }
 
 esp_err_t gdma_set_priority(gdma_channel_handle_t dma_chan, uint32_t priority)
 {
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
     ESP_RETURN_ON_FALSE(dma_chan && priority <= GDMA_LL_CHANNEL_MAX_PRIORITY, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
-        gdma_ll_tx_set_priority(group->hal.dev, pair->pair_id, priority);
-    } else {
-        gdma_ll_rx_set_priority(group->hal.dev, pair->pair_id, priority);
-    }
+    gdma_hal_set_priority(hal, pair->pair_id, dma_chan->direction, priority);
 
     return ESP_OK;
-
 }
 
 esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data)
 {
-    esp_err_t ret = ESP_OK;
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
-    ESP_GOTO_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
     gdma_tx_channel_t *tx_chan = __containerof(dma_chan, gdma_tx_channel_t, base);
 
 #if CONFIG_GDMA_ISR_IRAM_SAFE
     if (cbs->on_trans_eof) {
-        ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_eof), ESP_ERR_INVALID_ARG, err, TAG, "on_trans_eof not in IRAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_eof), ESP_ERR_INVALID_ARG,
+                            TAG, "on_trans_eof not in IRAM");
     }
     if (cbs->on_descr_err) {
-        ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG, err, TAG, "on_descr_err not in IRAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG,
+                            TAG, "on_descr_err not in IRAM");
     }
     if (user_data) {
-        ESP_GOTO_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
+                            TAG, "user context not in internal RAM");
     }
 #endif // CONFIG_GDMA_ISR_IRAM_SAFE
 
     // lazy install interrupt service
-    ESP_GOTO_ON_ERROR(gdma_install_tx_interrupt(tx_chan), err, TAG, "install interrupt service failed");
+    ESP_RETURN_ON_ERROR(gdma_install_tx_interrupt(tx_chan), TAG, "install interrupt service failed");
 
     // enable/disable GDMA interrupt events for TX channel
     portENTER_CRITICAL(&pair->spinlock);
-    gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
-    gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_TX_DESC_ERROR, cbs->on_descr_err != NULL);
+    gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
+    gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, GDMA_LL_EVENT_TX_DESC_ERROR, cbs->on_descr_err != NULL);
     portEXIT_CRITICAL(&pair->spinlock);
 
     memcpy(&tx_chan->cbs, cbs, sizeof(gdma_tx_event_callbacks_t));
     tx_chan->user_data = user_data;
 
-    ESP_GOTO_ON_ERROR(esp_intr_enable(dma_chan->intr), err, TAG, "enable interrupt failed");
+    ESP_RETURN_ON_ERROR(esp_intr_enable(dma_chan->intr), TAG, "enable interrupt failed");
 
-err:
-    return ret;
+    return ESP_OK;
 }
 
 esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_rx_event_callbacks_t *cbs, void *user_data)
 {
-    esp_err_t ret = ESP_OK;
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
-    ESP_GOTO_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
     gdma_rx_channel_t *rx_chan = __containerof(dma_chan, gdma_rx_channel_t, base);
 
 #if CONFIG_GDMA_ISR_IRAM_SAFE
     if (cbs->on_recv_eof) {
-        ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_eof), ESP_ERR_INVALID_ARG, err, TAG, "on_recv_eof not in IRAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_eof), ESP_ERR_INVALID_ARG,
+                            TAG, "on_recv_eof not in IRAM");
     }
     if (cbs->on_descr_err) {
-        ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG, err, TAG, "on_descr_err not in IRAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG,
+                            TAG, "on_descr_err not in IRAM");
     }
     if (cbs->on_recv_done) {
-        ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG, err, TAG, "on_recv_done not in IRAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG,
+                            TAG, "on_recv_done not in IRAM");
     }
     if (user_data) {
-        ESP_GOTO_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
+        ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
+                            TAG, "user context not in internal RAM");
     }
 #endif // CONFIG_GDMA_ISR_IRAM_SAFE
 
     // lazy install interrupt service
-    ESP_GOTO_ON_ERROR(gdma_install_rx_interrupt(rx_chan), err, TAG, "install interrupt service failed");
+    ESP_RETURN_ON_ERROR(gdma_install_rx_interrupt(rx_chan), TAG, "install interrupt service failed");
 
     // enable/disable GDMA interrupt events for RX channel
     portENTER_CRITICAL(&pair->spinlock);
-    gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
-    gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_DESC_ERROR, cbs->on_descr_err != NULL);
-    gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_DONE, cbs->on_recv_done != NULL);
+    gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
+    gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_DESC_ERROR, cbs->on_descr_err != NULL);
+    gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_DONE, cbs->on_recv_done != NULL);
     portEXIT_CRITICAL(&pair->spinlock);
 
     memcpy(&rx_chan->cbs, cbs, sizeof(gdma_rx_event_callbacks_t));
     rx_chan->user_data = user_data;
 
-    ESP_GOTO_ON_ERROR(esp_intr_enable(dma_chan->intr), err, TAG, "enable interrupt failed");
+    ESP_RETURN_ON_ERROR(esp_intr_enable(dma_chan->intr), TAG, "enable interrupt failed");
 
-err:
-    return ret;
+    return ESP_OK;
 }
 
 esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
 {
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
     ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
     ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
-    pair = dma_chan->pair;
-    group = pair->group;
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
-        gdma_ll_rx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
-        gdma_ll_rx_start(group->hal.dev, pair->pair_id);
-    } else {
-        gdma_ll_tx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
-        gdma_ll_tx_start(group->hal.dev, pair->pair_id);
-    }
+    gdma_hal_start_with_desc(hal, pair->pair_id, dma_chan->direction, desc_base_addr);
     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
 
     return ESP_OK;
@@ -486,19 +485,14 @@ esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
 
 esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
 {
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
     ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
     ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
-    pair = dma_chan->pair;
-    group = pair->group;
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
-        gdma_ll_rx_stop(group->hal.dev, pair->pair_id);
-    } else {
-        gdma_ll_tx_stop(group->hal.dev, pair->pair_id);
-    }
+    gdma_hal_stop(hal, pair->pair_id, dma_chan->direction);
     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
 
     return ESP_OK;
@@ -506,44 +500,30 @@ esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
 
 esp_err_t gdma_append(gdma_channel_handle_t dma_chan)
 {
-    esp_err_t ret = ESP_OK;
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
-    ESP_GOTO_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
-        gdma_ll_rx_restart(group->hal.dev, pair->pair_id);
-    } else {
-        gdma_ll_tx_restart(group->hal.dev, pair->pair_id);
-    }
+    gdma_hal_append(hal, pair->pair_id, dma_chan->direction);
     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
 
-err:
-    return ret;
+    return ESP_OK;
 }
 
 esp_err_t gdma_reset(gdma_channel_handle_t dma_chan)
 {
-    esp_err_t ret = ESP_OK;
-    gdma_pair_t *pair = NULL;
-    gdma_group_t *group = NULL;
-    ESP_GOTO_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
-    pair = dma_chan->pair;
-    group = pair->group;
+    ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
+    gdma_pair_t *pair = dma_chan->pair;
+    gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
 
     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
-    if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
-        gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id);
-    } else {
-        gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id);
-    }
+    gdma_hal_reset(hal, pair->pair_id, dma_chan->direction);
     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
 
-err:
-    return ret;
+    return ESP_OK;
 }
 
 static void gdma_release_group_handle(gdma_group_t *group)
@@ -556,19 +536,20 @@ static void gdma_release_group_handle(gdma_group_t *group)
     if (s_platform.group_ref_counts[group_id] == 0) {
         assert(s_platform.groups[group_id]);
         do_deinitialize = true;
-        s_platform.groups[group_id] = NULL; // deregister from platfrom
-        gdma_ll_enable_clock(group->hal.dev, false);
-        periph_module_disable(gdma_periph_signals.groups[group_id].module);
+        // deregister from the platform
+        s_platform.groups[group_id] = NULL;
     }
     portEXIT_CRITICAL(&s_platform.spinlock);
 
     if (do_deinitialize) {
+        gdma_hal_deinit(&group->hal);
+        periph_module_disable(gdma_periph_signals.groups[group_id].module);
         free(group);
         ESP_LOGD(TAG, "del group %d", group_id);
     }
 }
 
-static gdma_group_t *gdma_acquire_group_handle(int group_id)
+static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config))
 {
     bool new_group = false;
     gdma_group_t *group = NULL;
@@ -576,16 +557,12 @@ static gdma_group_t *gdma_acquire_group_handle(int group_id)
     if (!pre_alloc_group) {
         goto out;
     }
+
     portENTER_CRITICAL(&s_platform.spinlock);
     if (!s_platform.groups[group_id]) {
         new_group = true;
         group = pre_alloc_group;
         s_platform.groups[group_id] = group; // register to platform
-        group->group_id = group_id;
-        group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
-        periph_module_enable(gdma_periph_signals.groups[group_id].module); // enable APB to access GDMA registers
-        gdma_hal_init(&group->hal, group_id);       // initialize HAL context
-        gdma_ll_enable_clock(group->hal.dev, true); // enable gdma clock
     } else {
         group = s_platform.groups[group_id];
     }
@@ -594,7 +571,15 @@ static gdma_group_t *gdma_acquire_group_handle(int group_id)
     portEXIT_CRITICAL(&s_platform.spinlock);
 
     if (new_group) {
-        ESP_LOGD(TAG, "new group (%d) at %p", group->group_id, group);
+        group->group_id = group_id;
+        group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
+        // enable APB to access GDMA registers
+        periph_module_enable(gdma_periph_signals.groups[group_id].module);
+        gdma_hal_config_t config = {
+            .group_id = group_id,
+        };
+        hal_init(&group->hal, &config);
+        ESP_LOGD(TAG, "new group (%d) at %p", group_id, group);
     } else {
         free(pre_alloc_group);
     }
@@ -632,14 +617,13 @@ static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
     if (!pre_alloc_pair) {
         goto out;
     }
+
     portENTER_CRITICAL(&group->spinlock);
     if (!group->pairs[pair_id]) {
         new_pair = true;
         pair = pre_alloc_pair;
-        group->pairs[pair_id] = pair; // register to group
-        pair->group = group;
-        pair->pair_id = pair_id;
-        pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
+        // register the pair to the group
+        group->pairs[pair_id] = pair;
     } else {
         pair = group->pairs[pair_id];
     }
@@ -648,10 +632,16 @@ static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
     portEXIT_CRITICAL(&group->spinlock);
 
     if (new_pair) {
+        pair->group = group;
+        pair->pair_id = pair_id;
+        pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
+
         portENTER_CRITICAL(&s_platform.spinlock);
-        s_platform.group_ref_counts[group->group_id]++; // pair obtains a reference to group
+        // pair obtains a reference to group, so increase it
+        s_platform.group_ref_counts[group->group_id]++;
         portEXIT_CRITICAL(&s_platform.spinlock);
-        ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair->pair_id, pair);
+
+        ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair_id, pair);
     } else {
         free(pre_alloc_pair);
     }
@@ -663,6 +653,7 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
 {
     gdma_pair_t *pair = dma_channel->pair;
     gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
     int pair_id = pair->pair_id;
     int group_id = group->group_id;
     gdma_tx_channel_t *tx_chan = __containerof(dma_channel, gdma_tx_channel_t, base);
@@ -674,14 +665,12 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
     if (dma_channel->intr) {
         esp_intr_free(dma_channel->intr);
         portENTER_CRITICAL(&pair->spinlock);
-        gdma_ll_tx_enable_interrupt(group->hal.dev, pair_id, UINT32_MAX, false); // disable all interupt events
-        gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair_id, UINT32_MAX);  // clear all pending events
+        gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interupt events
+        gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
         portEXIT_CRITICAL(&pair->spinlock);
         ESP_LOGD(TAG, "uninstall interrupt service for tx channel (%d,%d)", group_id, pair_id);
     }
 
-    gdma_ll_tx_set_priority(group->hal.dev, pair_id, 0); // reset the priority to 0 (lowest)
-
     free(tx_chan);
     ESP_LOGD(TAG, "del tx channel (%d,%d)", group_id, pair_id);
     // channel has a reference on pair, release it now
@@ -693,6 +682,7 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
 {
     gdma_pair_t *pair = dma_channel->pair;
     gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
     int pair_id = pair->pair_id;
     int group_id = group->group_id;
     gdma_rx_channel_t *rx_chan = __containerof(dma_channel, gdma_rx_channel_t, base);
@@ -704,32 +694,32 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
     if (dma_channel->intr) {
         esp_intr_free(dma_channel->intr);
         portENTER_CRITICAL(&pair->spinlock);
-        gdma_ll_rx_enable_interrupt(group->hal.dev, pair_id, UINT32_MAX, false); // disable all interupt events
-        gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair_id, UINT32_MAX);  // clear all pending events
+        gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interupt events
+        gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
         portEXIT_CRITICAL(&pair->spinlock);
         ESP_LOGD(TAG, "uninstall interrupt service for rx channel (%d,%d)", group_id, pair_id);
     }
 
-    gdma_ll_rx_set_priority(group->hal.dev, pair_id, 0); // reset the priority to 0 (lowest)
-
     free(rx_chan);
     ESP_LOGD(TAG, "del rx channel (%d,%d)", group_id, pair_id);
     gdma_release_pair_handle(pair);
     return ESP_OK;
 }
 
-static void IRAM_ATTR gdma_default_rx_isr(void *args)
+void gdma_default_rx_isr(void *args)
 {
     gdma_rx_channel_t *rx_chan = (gdma_rx_channel_t *)args;
     gdma_pair_t *pair = rx_chan->base.pair;
     gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
+    int pair_id = pair->pair_id;
     bool need_yield = false;
     // clear pending interrupt event
-    uint32_t intr_status = gdma_ll_rx_get_interrupt_status(group->hal.dev, pair->pair_id);
-    gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
+    uint32_t intr_status = gdma_hal_read_intr_status(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX);
+    gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, intr_status);
 
     if ((intr_status & GDMA_LL_EVENT_RX_SUC_EOF) && rx_chan->cbs.on_recv_eof) {
-        uint32_t eof_addr = gdma_ll_rx_get_success_eof_desc_addr(group->hal.dev, pair->pair_id);
+        uint32_t eof_addr = gdma_hal_get_eof_desc_addr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX);
         gdma_event_data_t edata = {
             .rx_eof_desc_addr = eof_addr
         };
@@ -755,18 +745,20 @@ static void IRAM_ATTR gdma_default_rx_isr(void *args)
     }
 }
 
-static void IRAM_ATTR gdma_default_tx_isr(void *args)
+void gdma_default_tx_isr(void *args)
 {
     gdma_tx_channel_t *tx_chan = (gdma_tx_channel_t *)args;
     gdma_pair_t *pair = tx_chan->base.pair;
     gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
+    int pair_id = pair->pair_id;
     bool need_yield = false;
     // clear pending interrupt event
-    uint32_t intr_status = gdma_ll_tx_get_interrupt_status(group->hal.dev, pair->pair_id);
-    gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
+    uint32_t intr_status = gdma_hal_read_intr_status(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX);
+    gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, intr_status);
 
     if ((intr_status & GDMA_LL_EVENT_TX_EOF) && tx_chan->cbs.on_trans_eof) {
-        uint32_t eof_addr = gdma_ll_tx_get_eof_desc_addr(group->hal.dev, pair->pair_id);
+        uint32_t eof_addr = gdma_hal_get_eof_desc_addr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX);
         gdma_event_data_t edata = {
             .tx_eof_desc_addr = eof_addr
         };
@@ -785,23 +777,25 @@ static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan)
     esp_err_t ret = ESP_OK;
     gdma_pair_t *pair = rx_chan->base.pair;
     gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
+    int pair_id = pair->pair_id;
     // pre-alloc a interrupt handle, with handler disabled
     int isr_flags = GDMA_INTR_ALLOC_FLAGS;
-#if SOC_GDMA_TX_RX_SHARE_INTERRUPT
+#if GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT
     isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
 #endif
     intr_handle_t intr = NULL;
-    ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].rx_irq_id, isr_flags,
-                                    (uint32_t)gdma_ll_rx_get_interrupt_status_reg(group->hal.dev, pair->pair_id), GDMA_LL_RX_EVENT_MASK,
+    ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair_id].rx_irq_id, isr_flags,
+                                    gdma_hal_get_intr_status_reg(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX), GDMA_LL_RX_EVENT_MASK,
                                     gdma_default_rx_isr, rx_chan, &intr);
     ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
     rx_chan->base.intr = intr;
 
     portENTER_CRITICAL(&pair->spinlock);
-    gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
-    gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX);  // clear all pending events
+    gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interupt events
+    gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
     portEXIT_CRITICAL(&pair->spinlock);
-    ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair->pair_id);
+    ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair_id);
 
 err:
     return ret;
@@ -812,23 +806,25 @@ static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan)
     esp_err_t ret = ESP_OK;
     gdma_pair_t *pair = tx_chan->base.pair;
     gdma_group_t *group = pair->group;
+    gdma_hal_context_t *hal = &group->hal;
+    int pair_id = pair->pair_id;
     // pre-alloc a interrupt handle, with handler disabled
     int isr_flags = GDMA_INTR_ALLOC_FLAGS;
-#if SOC_GDMA_TX_RX_SHARE_INTERRUPT
+#if GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT
     isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
 #endif
     intr_handle_t intr = NULL;
-    ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].tx_irq_id, isr_flags,
-                                    (uint32_t)gdma_ll_tx_get_interrupt_status_reg(group->hal.dev, pair->pair_id), GDMA_LL_TX_EVENT_MASK,
+    ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair_id].tx_irq_id, isr_flags,
+                                    gdma_hal_get_intr_status_reg(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX), GDMA_LL_TX_EVENT_MASK,
                                     gdma_default_tx_isr, tx_chan, &intr);
     ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
     tx_chan->base.intr = intr;
 
     portENTER_CRITICAL(&pair->spinlock);
-    gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
-    gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX);  // clear all pending events
+    gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interupt events
+    gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
     portEXIT_CRITICAL(&pair->spinlock);
-    ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair->pair_id);
+    ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair_id);
 
 err:
     return ret;

+ 6 - 3
components/esp_hw_support/dma/gdma_priv.h

@@ -15,6 +15,8 @@
 #include "soc/soc_caps.h"
 #include "hal/gdma_hal.h"
 #include "hal/gdma_ll.h"
+#include "hal/gdma_hal_ahb.h"
+#include "hal/gdma_hal_axi.h"
 #include "soc/gdma_periph.h"
 #include "esp_private/gdma.h"
 
@@ -40,13 +42,14 @@ typedef struct gdma_tx_channel_t gdma_tx_channel_t;
 typedef struct gdma_rx_channel_t gdma_rx_channel_t;
 
 typedef struct gdma_group_t {
-    int group_id;           // Group ID, index from 0
+    int group_id; // Group ID, index from 0
+    int bus_id;   // which system does the GDMA instance attached to
     gdma_hal_context_t hal; // HAL instance is at group level
     portMUX_TYPE spinlock;  // group level spinlock
     uint32_t tx_periph_in_use_mask; // each bit indicates which peripheral (TX direction) has been occupied
     uint32_t rx_periph_in_use_mask; // each bit indicates which peripheral (RX direction) has been occupied
-    gdma_pair_t *pairs[SOC_GDMA_PAIRS_PER_GROUP];  // handles of GDMA pairs
-    int pair_ref_counts[SOC_GDMA_PAIRS_PER_GROUP]; // reference count used to protect pair install/uninstall
+    gdma_pair_t *pairs[SOC_GDMA_PAIRS_PER_GROUP_MAX];  // handles of GDMA pairs
+    int pair_ref_counts[SOC_GDMA_PAIRS_PER_GROUP_MAX]; // reference count used to protect pair install/uninstall
 } gdma_group_t;
 
 struct gdma_pair_t {

+ 69 - 0
components/esp_hw_support/dma/linker.lf

@@ -0,0 +1,69 @@
+[mapping:gdma_driver]
+archive: libesp_hw_support.a
+entries:
+    # performance optimization, always put the DMA default interrupt handler in IRAM
+    if SOC_GDMA_SUPPORTED = y:
+        gdma: gdma_default_tx_isr (noflash)
+        gdma: gdma_default_rx_isr (noflash)
+
+    # put GDMA control functions in IRAM
+    if GDMA_CTRL_FUNC_IN_IRAM = y:
+        gdma: gdma_start (noflash)
+        gdma: gdma_stop (noflash)
+        gdma: gdma_append (noflash)
+        gdma: gdma_reset (noflash)
+
+[mapping:gdma_hal]
+archive: libhal.a
+entries:
+    # performance optimization, always put the DMA default interrupt handler in IRAM
+    if SOC_GDMA_SUPPORTED = y:
+        gdma_hal_top: gdma_hal_clear_intr (noflash)
+        gdma_hal_top: gdma_hal_read_intr_status (noflash)
+        gdma_hal_top: gdma_hal_get_eof_desc_addr (noflash)
+
+    # GDMA implementation layer for AHB-DMA version 1
+    if SOC_AHB_GDMA_VERSION = 1:
+        gdma_hal_ahb_v1: gdma_ahb_hal_clear_intr (noflash)
+        gdma_hal_ahb_v1: gdma_ahb_hal_read_intr_status (noflash)
+        gdma_hal_ahb_v1: gdma_ahb_hal_get_eof_desc_addr (noflash)
+
+    # GDMA implementation layer for AHB-DMA version 2
+    if SOC_AHB_GDMA_VERSION = 2:
+        gdma_hal_ahb_v2: gdma_ahb_hal_clear_intr (noflash)
+        gdma_hal_ahb_v2: gdma_ahb_hal_read_intr_status (noflash)
+        gdma_hal_ahb_v2: gdma_ahb_hal_get_eof_desc_addr (noflash)
+
+    # GDMA implementation layer for AXI-DMA
+    if SOC_AXI_GDMA_SUPPORTED = y:
+        gdma_hal_axi: gdma_axi_hal_clear_intr (noflash)
+        gdma_hal_axi: gdma_axi_hal_read_intr_status (noflash)
+        gdma_hal_axi: gdma_axi_hal_get_eof_desc_addr (noflash)
+
+    # put GDMA control HAL functions in IRAM
+    if GDMA_CTRL_FUNC_IN_IRAM = y:
+        gdma_hal_top: gdma_hal_start_with_desc (noflash)
+        gdma_hal_top: gdma_hal_stop (noflash)
+        gdma_hal_top: gdma_hal_append (noflash)
+        gdma_hal_top: gdma_hal_reset (noflash)
+
+        # GDMA implementation layer for AHB-DMA version 1
+        if SOC_AHB_GDMA_VERSION = 1:
+            gdma_hal_ahb_v1: gdma_ahb_hal_start_with_desc (noflash)
+            gdma_hal_ahb_v1: gdma_ahb_hal_stop (noflash)
+            gdma_hal_ahb_v1: gdma_ahb_hal_append (noflash)
+            gdma_hal_ahb_v1: gdma_ahb_hal_reset (noflash)
+
+        # GDMA implementation layer for AHB-DMA version 2
+        if SOC_AHB_GDMA_VERSION = 2:
+            gdma_hal_ahb_v2: gdma_ahb_hal_start_with_desc (noflash)
+            gdma_hal_ahb_v2: gdma_ahb_hal_stop (noflash)
+            gdma_hal_ahb_v2: gdma_ahb_hal_append (noflash)
+            gdma_hal_ahb_v2: gdma_ahb_hal_reset (noflash)
+
+        # GDMA implementation layer for AXI-DMA
+        if SOC_AXI_GDMA_SUPPORTED = y:
+            gdma_hal_axi: gdma_axi_hal_start_with_desc (noflash)
+            gdma_hal_axi: gdma_axi_hal_stop (noflash)
+            gdma_hal_axi: gdma_axi_hal_append (noflash)
+            gdma_hal_axi: gdma_axi_hal_reset (noflash)

+ 25 - 5
components/esp_hw_support/include/esp_private/gdma.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -98,6 +98,7 @@ typedef struct {
 typedef struct {
     gdma_trigger_peripheral_t periph; /*!< Target peripheral which will trigger DMA operations */
     int instance_id;                  /*!< Peripheral instance ID. Supported IDs are listed in `soc/gdma_channel.h`, e.g. SOC_GDMA_TRIG_PERIPH_UHCI0 */
+    int bus_id;                       /*!< Which system bus should the DMA attached to */
 } gdma_trigger_t;
 
 /**
@@ -107,7 +108,7 @@ typedef struct {
  *
  */
 #define GDMA_MAKE_TRIGGER(peri, id) \
-    (gdma_trigger_t) { .periph = peri, .instance_id = SOC_##peri##id }
+    (gdma_trigger_t) { .periph = peri, .instance_id = SOC_##peri##id, .bus_id = SOC_##peri##id##_BUS }
 
 /**
  * @brief A collection of strategy item that each GDMA channel could apply
@@ -118,20 +119,39 @@ typedef struct {
     bool auto_update_desc; /*!< If set / clear, DMA channel enables / disables hardware to update descriptor automatically (TX channel only) */
 } gdma_strategy_config_t;
 
+/** @cond */
+esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
+/** @endcond */
+
 /**
- * @brief Create GDMA channel
+ * @brief Create AHB-GDMA channel
  * @note This API won't install interrupt service for the allocated channel.
  *       If interrupt service is needed, user has to register GDMA event callback by `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`.
  *
  * @param[in] config Pointer to a collection of configurations for allocating GDMA channel
- * @param[out] ret_chan Returnned channel handle
+ * @param[out] ret_chan Returned channel handle
  * @return
  *      - ESP_OK: Create DMA channel successfully
  *      - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
  *      - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
  *      - ESP_FAIL: Create DMA channel failed because of other error
  */
-esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
+esp_err_t gdma_new_ahb_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
+
+/**
+ * @brief Create AXI-GDMA channel
+ * @note This API won't install interrupt service for the allocated channel.
+ *       If interrupt service is needed, user has to register GDMA event callback by `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`.
+ *
+ * @param[in] config Pointer to a collection of configurations for allocating GDMA channel
+ * @param[out] ret_chan Returned channel handle
+ * @return
+ *      - ESP_OK: Create DMA channel successfully
+ *      - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
+ *      - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
+ *      - ESP_FAIL: Create DMA channel failed because of other error
+ */
+esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
 
 /**
  * @brief Connect GDMA channel to trigger peripheral

+ 0 - 5
components/esp_hw_support/linker.lf

@@ -31,11 +31,6 @@ entries:
     if PERIPH_CTRL_FUNC_IN_IRAM = y && ESP_WIFI_ENABLED = y:
         periph_ctrl: wifi_module_enable (noflash)
         periph_ctrl: wifi_module_disable (noflash)
-    if GDMA_CTRL_FUNC_IN_IRAM = y:
-        gdma: gdma_start (noflash)
-        gdma: gdma_stop (noflash)
-        gdma: gdma_append (noflash)
-        gdma: gdma_reset (noflash)
     if SOC_SYSTIMER_SUPPORTED = y:
         systimer (noflash)
     if APP_BUILD_TYPE_PURE_RAM_APP = n:

+ 2 - 2
components/esp_hw_support/test_apps/dma/main/test_async_memcpy.c

@@ -47,7 +47,7 @@ static void async_memcpy_setup_testbench(memcpy_testbench_context_t *test_contex
     uint8_t *dst_buf = NULL;
     uint8_t *from_addr = NULL;
     uint8_t *to_addr = NULL;
-#if CONFIG_SPIRAM && SOC_GDMA_SUPPORT_PSRAM
+#if CONFIG_SPIRAM && SOC_AHB_GDMA_SUPPORT_PSRAM
     if (test_context->src_in_psram) {
         src_buf = heap_caps_malloc(buffer_size, MALLOC_CAP_SPIRAM);
     } else {
@@ -249,7 +249,7 @@ static void memcpy_performance_test(uint32_t buffer_size)
     IDF_LOG_PERFORMANCE("CPU_COPY", "%.2f MB/s, dir: SRAM->SRAM, size: %zu Bytes", throughput, test_context.buffer_size);
     async_memcpy_verify_and_clear_testbench(test_context.seed, test_context.buffer_size, test_context.src_buf, test_context.dst_buf, test_context.from_addr, test_context.to_addr);
 
-#if CONFIG_SPIRAM && SOC_GDMA_SUPPORT_PSRAM
+#if CONFIG_SPIRAM && SOC_AHB_GDMA_SUPPORT_PSRAM
     // 2. PSRAM->PSRAM
     test_context.src_in_psram = true;
     test_context.dst_in_psram = true;

+ 10 - 9
components/esp_hw_support/test_apps/dma/main/test_gdma.c

@@ -1,47 +1,48 @@
 /*
- * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
 #include "unity.h"
 #include "esp_private/gdma.h"
 #include "soc/soc_caps.h"
+#include "hal/gdma_ll.h"
 
-TEST_CASE("GDMA channel allocation", "[gdma]")
+TEST_CASE("AHB GDMA channel allocation", "[gdma]")
 {
     gdma_channel_alloc_config_t channel_config = {};
-    gdma_channel_handle_t tx_channels[SOC_GDMA_PAIRS_PER_GROUP] = {};
-    gdma_channel_handle_t rx_channels[SOC_GDMA_PAIRS_PER_GROUP] = {};
+    gdma_channel_handle_t tx_channels[GDMA_LL_AHB_PAIRS_PER_GROUP] = {};
+    gdma_channel_handle_t rx_channels[GDMA_LL_AHB_PAIRS_PER_GROUP] = {};
     channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
     gdma_tx_event_callbacks_t tx_cbs = {};
     gdma_rx_event_callbacks_t rx_cbs = {};
 
     // install TX channels
-    for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
+    for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
         TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[i]));
         TEST_ESP_OK(gdma_register_tx_event_callbacks(tx_channels[i], &tx_cbs, NULL));
     };
     TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &tx_channels[0]));
 
     // Free interrupts before installing RX interrupts to ensure enough free interrupts
-    for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
+    for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
         TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
     }
 
     // install RX channels
     channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
-    for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
+    for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
         TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[i]));
         TEST_ESP_OK(gdma_register_rx_event_callbacks(rx_channels[i], &rx_cbs, NULL));
     }
     TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &rx_channels[0]));
 
-    for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
+    for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
         TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
     }
 
     // install single and paired TX/RX channels
-#if SOC_GDMA_PAIRS_PER_GROUP >= 2
+#if GDMA_LL_AHB_PAIRS_PER_GROUP >= 2
     // single tx channel
     channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
     TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[0]));

+ 13 - 1
components/hal/CMakeLists.txt

@@ -88,7 +88,19 @@ if(NOT BOOTLOADER_BUILD)
     endif()
 
     if(CONFIG_SOC_GDMA_SUPPORTED)
-        list(APPEND srcs "gdma_hal.c")
+        list(APPEND srcs "gdma_hal_top.c")
+    endif()
+
+    if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 1)
+        list(APPEND srcs "gdma_hal_ahb_v1.c")
+    endif()
+
+    if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 2)
+        list(APPEND srcs "gdma_hal_ahb_v2.c")
+    endif()
+
+    if(CONFIG_SOC_AXI_GDMA_SUPPORTED)
+        list(APPEND srcs "gdma_hal_axi.c")
     endif()
 
     if(CONFIG_SOC_I2S_SUPPORTED)

+ 1 - 1
components/hal/adc_hal.c

@@ -27,7 +27,7 @@
 /*---------------------------------------------------------------
             Define all ADC DMA required operations here
 ---------------------------------------------------------------*/
-#if SOC_GDMA_SUPPORTED
+#if SOC_AHB_GDMA_VERSION == 1
 #define adc_dma_ll_rx_clear_intr(dev, chan, mask)       gdma_ll_rx_clear_interrupt_status(dev, chan, mask)
 #define adc_dma_ll_rx_enable_intr(dev, chan, mask)      gdma_ll_rx_enable_interrupt(dev, chan, mask, true)
 #define adc_dma_ll_rx_disable_intr(dev, chan, mask)     gdma_ll_rx_enable_interrupt(dev, chan, mask, false)

+ 8 - 3
components/hal/esp32c2/include/hal/gdma_ll.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -41,11 +41,16 @@ extern "C" {
 #define GDMA_LL_EVENT_RX_SUC_EOF    (1<<1)
 #define GDMA_LL_EVENT_RX_DONE       (1<<0)
 
+#define GDMA_LL_AHB_GROUP_START_ID    0       // AHB GDMA group ID starts from 0
+#define GDMA_LL_AHB_NUM_GROUPS        1       // Number of AHB GDMA groups
+#define GDMA_LL_AHB_PAIRS_PER_GROUP   1       // Number of GDMA pairs in each AHB group
+#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT  1  // TX and RX channel in the same pair will share the same interrupt source number
+
 ///////////////////////////////////// Common /////////////////////////////////////////
 /**
- * @brief Enable DMA clock gating
+ * @brief Force enable register clock
  */
-static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
+static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
 {
     dev->misc_conf.clk_en = enable;
 }

+ 8 - 3
components/hal/esp32c3/include/hal/gdma_ll.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -41,11 +41,16 @@ extern "C" {
 #define GDMA_LL_EVENT_RX_SUC_EOF    (1<<1)
 #define GDMA_LL_EVENT_RX_DONE       (1<<0)
 
+#define GDMA_LL_AHB_GROUP_START_ID         0 // AHB GDMA group ID starts from 0
+#define GDMA_LL_AHB_NUM_GROUPS             1 // Number of AHB GDMA groups
+#define GDMA_LL_AHB_PAIRS_PER_GROUP        3 // Number of GDMA pairs in each AHB group
+#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT  1  // TX and RX channel in the same pair will share the same interrupt source number
+
 ///////////////////////////////////// Common /////////////////////////////////////////
 /**
- * @brief Enable DMA clock gating
+ * @brief Force enable register clock
  */
-static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
+static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
 {
     dev->misc_conf.clk_en = enable;
 }

+ 14 - 10
components/hal/esp32c6/include/hal/gdma_ll.h

@@ -42,6 +42,10 @@ extern "C" {
 #define GDMA_LL_EVENT_RX_SUC_EOF    (1<<1)
 #define GDMA_LL_EVENT_RX_DONE       (1<<0)
 
+#define GDMA_LL_AHB_GROUP_START_ID    0 // AHB GDMA group ID starts from 0
+#define GDMA_LL_AHB_NUM_GROUPS        1 // Number of AHB GDMA groups
+#define GDMA_LL_AHB_PAIRS_PER_GROUP   3 // Number of GDMA pairs in each AHB group
+
 #define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event)                                     \
     (uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{                                                \
                                               [GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
@@ -88,9 +92,9 @@ extern "C" {
 
 ///////////////////////////////////// Common /////////////////////////////////////////
 /**
- * @brief Enable DMA clock gating
+ * @brief Force enable register clock
  */
-static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
+static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
 {
     dev->misc_conf.clk_en = enable;
 }
@@ -102,7 +106,7 @@ static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
 __attribute__((always_inline))
 static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
 {
-    return dev->in_intr[channel].st.val & GDMA_LL_RX_EVENT_MASK;
+    return dev->in_intr[channel].st.val;
 }
 
 /**
@@ -111,9 +115,9 @@ static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t
 static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
 {
     if (enable) {
-        dev->in_intr[channel].ena.val |= (mask & GDMA_LL_RX_EVENT_MASK);
+        dev->in_intr[channel].ena.val |= mask;
     } else {
-        dev->in_intr[channel].ena.val &= ~(mask & GDMA_LL_RX_EVENT_MASK);
+        dev->in_intr[channel].ena.val &= ~mask;
     }
 }
 
@@ -123,7 +127,7 @@ static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
 __attribute__((always_inline))
 static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
 {
-    dev->in_intr[channel].clr.val = (mask & GDMA_LL_RX_EVENT_MASK);
+    dev->in_intr[channel].clr.val = mask;
 }
 
 /**
@@ -326,7 +330,7 @@ static inline void gdma_ll_rx_enable_etm_task(gdma_dev_t *dev, uint32_t channel,
 __attribute__((always_inline))
 static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
 {
-    return dev->out_intr[channel].st.val & GDMA_LL_TX_EVENT_MASK;
+    return dev->out_intr[channel].st.val;
 }
 
 /**
@@ -335,9 +339,9 @@ static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t
 static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
 {
     if (enable) {
-        dev->out_intr[channel].ena.val |= (mask & GDMA_LL_TX_EVENT_MASK);
+        dev->out_intr[channel].ena.val |= mask;
     } else {
-        dev->out_intr[channel].ena.val &= ~(mask & GDMA_LL_TX_EVENT_MASK);
+        dev->out_intr[channel].ena.val &= ~mask;
     }
 }
 
@@ -347,7 +351,7 @@ static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
 __attribute__((always_inline))
 static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
 {
-    dev->out_intr[channel].clr.val = (mask & GDMA_LL_TX_EVENT_MASK);
+    dev->out_intr[channel].clr.val = mask;
 }
 
 /**

+ 14 - 10
components/hal/esp32h2/include/hal/gdma_ll.h

@@ -42,6 +42,10 @@ extern "C" {
 #define GDMA_LL_EVENT_RX_SUC_EOF    (1<<1)
 #define GDMA_LL_EVENT_RX_DONE       (1<<0)
 
+#define GDMA_LL_AHB_GROUP_START_ID    0 // AHB GDMA group ID starts from 0
+#define GDMA_LL_AHB_NUM_GROUPS        1 // Number of AHB GDMA groups
+#define GDMA_LL_AHB_PAIRS_PER_GROUP   3 // Number of GDMA pairs in each AHB group
+
 #define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event)                                     \
     (uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{                                                \
                                               [GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
@@ -88,9 +92,9 @@ extern "C" {
 
 ///////////////////////////////////// Common /////////////////////////////////////////
 /**
- * @brief Enable DMA clock gating
+ * @brief Force enable register clock
  */
-static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
+static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
 {
     dev->misc_conf.clk_en = enable;
 }
@@ -102,7 +106,7 @@ static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
 __attribute__((always_inline))
 static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
 {
-    return dev->in_intr[channel].st.val & GDMA_LL_RX_EVENT_MASK;
+    return dev->in_intr[channel].st.val;
 }
 
 /**
@@ -111,9 +115,9 @@ static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t
 static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
 {
     if (enable) {
-        dev->in_intr[channel].ena.val |= (mask & GDMA_LL_RX_EVENT_MASK);
+        dev->in_intr[channel].ena.val |= mask;
     } else {
-        dev->in_intr[channel].ena.val &= ~(mask & GDMA_LL_RX_EVENT_MASK);
+        dev->in_intr[channel].ena.val &= ~mask;
     }
 }
 
@@ -123,7 +127,7 @@ static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
 __attribute__((always_inline))
 static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
 {
-    dev->in_intr[channel].clr.val = (mask & GDMA_LL_RX_EVENT_MASK);
+    dev->in_intr[channel].clr.val = mask;
 }
 
 /**
@@ -326,7 +330,7 @@ static inline void gdma_ll_rx_enable_etm_task(gdma_dev_t *dev, uint32_t channel,
 __attribute__((always_inline))
 static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
 {
-    return dev->out_intr[channel].st.val & GDMA_LL_TX_EVENT_MASK;
+    return dev->out_intr[channel].st.val;
 }
 
 /**
@@ -335,9 +339,9 @@ static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t
 static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
 {
     if (enable) {
-        dev->out_intr[channel].ena.val |= (mask & GDMA_LL_TX_EVENT_MASK);
+        dev->out_intr[channel].ena.val |= mask;
     } else {
-        dev->out_intr[channel].ena.val &= ~(mask & GDMA_LL_TX_EVENT_MASK);
+        dev->out_intr[channel].ena.val &= ~mask;
     }
 }
 
@@ -347,7 +351,7 @@ static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
 __attribute__((always_inline))
 static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
 {
-    dev->out_intr[channel].clr.val = (mask & GDMA_LL_TX_EVENT_MASK);
+    dev->out_intr[channel].clr.val = mask;
 }
 
 /**

+ 505 - 0
components/hal/esp32p4/include/ahb_dma_ll.h

@@ -0,0 +1,505 @@
+/*
+ * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include "hal/gdma_types.h"
+#include "hal/gdma_ll.h"
+#include "soc/ahb_dma_struct.h"
+#include "soc/ahb_dma_reg.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AHB_DMA_LL_GET_HW(id) (((id) == 0) ? (&AHB_DMA) : NULL)
+
+// any "dummy" peripheral ID can be used for M2M mode
+#define AHB_DMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFAC2)
+#define AHB_DMA_LL_INVALID_PERIPH_ID       (0x3F)
+
+///////////////////////////////////// Common /////////////////////////////////////////
+/**
+ * @brief Force enable register clock
+ */
+static inline void ahb_dma_ll_force_enable_reg_clock(ahb_dma_dev_t *dev, bool enable)
+{
+    dev->misc_conf.clk_en = enable;
+}
+
+/**
+ * @brief Disable priority arbitration
+ *
+ * @param dev DMA register base address
+ * @param dis True to disable, false to enable
+ */
+static inline void ahb_dma_ll_disable_prio_arb(ahb_dma_dev_t *dev, bool dis)
+{
+    dev->misc_conf.arb_pri_dis = dis;
+}
+
+/**
+ * @brief Reset DMA FSM
+ *
+ * @param dev DMA register base address
+ */
+static inline void ahb_dma_ll_reset_fsm(ahb_dma_dev_t *dev)
+{
+    dev->misc_conf.ahbm_rst_inter = 1;
+    dev->misc_conf.ahbm_rst_inter = 0;
+}
+
+///////////////////////////////////// RX /////////////////////////////////////////
+/**
+ * @brief Get DMA RX channel interrupt status word
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_rx_get_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->in_intr[channel].st.val;
+}
+
+/**
+ * @brief Enable DMA RX channel interrupt
+ */
+static inline void ahb_dma_ll_rx_enable_interrupt(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
+{
+    if (enable) {
+        dev->in_intr[channel].ena.val |= mask;
+    } else {
+        dev->in_intr[channel].ena.val &= ~mask;
+    }
+}
+
+/**
+ * @brief Clear DMA RX channel interrupt
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_rx_clear_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask)
+{
+    dev->in_intr[channel].clr.val = mask;
+}
+
+/**
+ * @brief Get DMA RX channel interrupt status register address
+ */
+static inline volatile void *ahb_dma_ll_rx_get_interrupt_status_reg(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return (volatile void *)(&dev->in_intr[channel].st);
+}
+
+/**
+ * @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
+ */
+static inline void ahb_dma_ll_rx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].in.in_conf1.in_check_owner_chn = enable;
+}
+
+/**
+ * @brief Enable DMA RX channel burst reading data, disabled by default
+ */
+static inline void ahb_dma_ll_rx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].in.in_conf0.in_data_burst_en_chn = enable;
+}
+
+/**
+ * @brief Enable DMA RX channel burst reading descriptor link, disabled by default
+ */
+static inline void ahb_dma_ll_rx_enable_descriptor_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].in.in_conf0.indscr_burst_en_chn = enable;
+}
+
+/**
+ * @brief Reset DMA RX channel FSM and FIFO pointer
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_rx_reset_channel(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].in.in_conf0.in_rst_chn = 1;
+    dev->channel[channel].in.in_conf0.in_rst_chn = 0;
+}
+
+/**
+ * @brief Check if DMA RX FIFO is full
+ * @param fifo_level only supports level 1
+ */
+static inline bool ahb_dma_ll_rx_is_fifo_full(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
+{
+    return dev->channel[channel].in.infifo_status.val & 0x01;
+}
+
+/**
+ * @brief Check if DMA RX FIFO is empty
+ * @param fifo_level only supports level 1
+ */
+static inline bool ahb_dma_ll_rx_is_fifo_empty(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
+{
+    return dev->channel[channel].in.infifo_status.val & 0x02;
+}
+
+/**
+ * @brief Get number of bytes remained in the L1 RX FIFO
+ * @param fifo_level only supports level 1
+ */
+static inline uint32_t ahb_dma_ll_rx_get_fifo_bytes(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
+{
+    return dev->channel[channel].in.infifo_status.infifo_cnt_chn;
+}
+
+/**
+ * @brief Pop data from DMA RX FIFO
+ */
+static inline uint32_t ahb_dma_ll_rx_pop_data(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].in.in_pop.infifo_pop_chn = 1;
+    return dev->channel[channel].in.in_pop.infifo_rdata_chn;
+}
+
+/**
+ * @brief Set the descriptor link base address for RX channel
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_rx_set_desc_addr(ahb_dma_dev_t *dev, uint32_t channel, uint32_t addr)
+{
+    dev->in_link_addr[channel].inlink_addr_chn = addr;
+}
+
+/**
+ * @brief Start dealing with RX descriptors
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_rx_start(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].in.in_link.inlink_start_chn = 1;
+}
+
+/**
+ * @brief Stop dealing with RX descriptors
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_rx_stop(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].in.in_link.inlink_stop_chn = 1;
+}
+
+/**
+ * @brief Restart a new inlink right after the last descriptor
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_rx_restart(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].in.in_link.inlink_restart_chn = 1;
+}
+
+/**
+ * @brief Enable DMA RX to return the address of current descriptor when receives error
+ */
+static inline void ahb_dma_ll_rx_enable_auto_return(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].in.in_link.inlink_auto_ret_chn = enable;
+}
+
+/**
+ * @brief Check if DMA RX FSM is in IDLE state
+ */
+static inline bool ahb_dma_ll_rx_is_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].in.in_link.inlink_park_chn;
+}
+
+/**
+ * @brief Get RX success EOF descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_rx_get_success_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].in.in_suc_eof_des_addr.val;
+}
+
+/**
+ * @brief Get RX error EOF descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_rx_get_error_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].in.in_err_eof_des_addr.val;
+}
+
+/**
+ * @brief Get the pre-fetched RX descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_rx_get_prefetched_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].in.in_dscr.val;
+}
+
+/**
+ * @brief Set priority for DMA RX channel
+ */
+static inline void ahb_dma_ll_rx_set_priority(ahb_dma_dev_t *dev, uint32_t channel, uint32_t prio)
+{
+    dev->channel[channel].in.in_pri.rx_pri_chn = prio;
+}
+
+/**
+ * @brief Connect DMA RX channel to a given peripheral
+ */
+static inline void ahb_dma_ll_rx_connect_to_periph(ahb_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
+{
+    dev->channel[channel].in.in_peri_sel.peri_in_sel_chn = periph_id;
+    dev->channel[channel].in.in_conf0.mem_trans_en_chn = (periph == GDMA_TRIG_PERIPH_M2M);
+}
+
+/**
+ * @brief Disconnect DMA RX channel from peripheral
+ */
+static inline void ahb_dma_ll_rx_disconnect_from_periph(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].in.in_peri_sel.peri_in_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
+    dev->channel[channel].in.in_conf0.mem_trans_en_chn = false;
+}
+
+/**
+ * @brief Whether to enable the ETM subsystem for RX channel
+ *
+ * @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
+ */
+static inline void ahb_dma_ll_rx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].in.in_conf0.in_etm_en_chn = enable;
+}
+
+///////////////////////////////////// TX /////////////////////////////////////////
+/**
+ * @brief Get DMA TX channel interrupt status word
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_tx_get_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->out_intr[channel].st.val;
+}
+
+/**
+ * @brief Enable DMA TX channel interrupt
+ */
+static inline void ahb_dma_ll_tx_enable_interrupt(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
+{
+    if (enable) {
+        dev->out_intr[channel].ena.val |= mask;
+    } else {
+        dev->out_intr[channel].ena.val &= ~mask;
+    }
+}
+
+/**
+ * @brief Clear DMA TX channel interrupt
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_tx_clear_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask)
+{
+    dev->out_intr[channel].clr.val = mask;
+}
+
+/**
+ * @brief Get DMA TX channel interrupt status register address
+ */
+static inline volatile void *ahb_dma_ll_tx_get_interrupt_status_reg(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return (volatile void *)(&dev->out_intr[channel].st);
+}
+
+/**
+ * @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
+ */
+static inline void ahb_dma_ll_tx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].out.out_conf1.out_check_owner_chn = enable;
+}
+
+/**
+ * @brief Enable DMA TX channel burst sending data, disabled by default
+ */
+static inline void ahb_dma_ll_tx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].out.out_conf0.out_data_burst_en_chn = enable;
+}
+
+/**
+ * @brief Enable DMA TX channel burst reading descriptor link, disabled by default
+ */
+static inline void ahb_dma_ll_tx_enable_descriptor_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].out.out_conf0.outdscr_burst_en_chn = enable;
+}
+
+/**
+ * @brief Set TX channel EOF mode
+ */
+static inline void ahb_dma_ll_tx_set_eof_mode(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mode)
+{
+    dev->channel[channel].out.out_conf0.out_eof_mode_chn = mode;
+}
+
+/**
+ * @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
+ */
+static inline void ahb_dma_ll_tx_enable_auto_write_back(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].out.out_conf0.out_auto_wrback_chn = enable;
+}
+
+/**
+ * @brief Reset DMA TX channel FSM and FIFO pointer
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_tx_reset_channel(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].out.out_conf0.out_rst_chn = 1;
+    dev->channel[channel].out.out_conf0.out_rst_chn = 0;
+}
+
+/**
+ * @brief Check if DMA TX FIFO is full
+ * @param fifo_level only supports level 1
+ */
+static inline bool ahb_dma_ll_tx_is_fifo_full(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
+{
+    return dev->channel[channel].out.outfifo_status.val & 0x01;
+}
+
+/**
+ * @brief Check if DMA TX FIFO is empty
+ * @param fifo_level only supports level 1
+ */
+static inline bool ahb_dma_ll_tx_is_fifo_empty(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
+{
+    return dev->channel[channel].out.outfifo_status.val & 0x02;
+}
+
+/**
+ * @brief Get number of bytes in TX FIFO
+ * @param fifo_level only supports level 1
+ */
+static inline uint32_t ahb_dma_ll_tx_get_fifo_bytes(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
+{
+    return dev->channel[channel].out.outfifo_status.outfifo_cnt_chn;
+}
+
+/**
+ * @brief Push data into DMA TX FIFO
+ */
+static inline void ahb_dma_ll_tx_push_data(ahb_dma_dev_t *dev, uint32_t channel, uint32_t data)
+{
+    dev->channel[channel].out.out_push.outfifo_wdata_chn = data;
+    dev->channel[channel].out.out_push.outfifo_push_chn = 1;
+}
+
+/**
+ * @brief Set the descriptor link base address for TX channel
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_tx_set_desc_addr(ahb_dma_dev_t *dev, uint32_t channel, uint32_t addr)
+{
+    dev->out_link_addr[channel].outlink_addr_chn = addr;
+}
+
+/**
+ * @brief Start dealing with TX descriptors
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_tx_start(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].out.out_link.outlink_start_chn = 1;
+}
+
+/**
+ * @brief Stop dealing with TX descriptors
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_tx_stop(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].out.out_link.outlink_stop_chn = 1;
+}
+
+/**
+ * @brief Restart a new outlink right after the last descriptor
+ */
+__attribute__((always_inline))
+static inline void ahb_dma_ll_tx_restart(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].out.out_link.outlink_restart_chn = 1;
+}
+
+/**
+ * @brief Check if DMA TX FSM is in IDLE state
+ */
+static inline bool ahb_dma_ll_tx_is_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].out.out_link.outlink_park_chn;
+}
+
+/**
+ * @brief Get TX EOF descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_tx_get_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].out.out_eof_des_addr.val;
+}
+
+/**
+ * @brief Get the pre-fetched TX descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t ahb_dma_ll_tx_get_prefetched_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->channel[channel].out.out_dscr.val;
+}
+
+/**
+ * @brief Set priority for DMA TX channel
+ */
+static inline void ahb_dma_ll_tx_set_priority(ahb_dma_dev_t *dev, uint32_t channel, uint32_t prio)
+{
+    dev->channel[channel].out.out_pri.tx_pri_chn = prio;
+}
+
+/**
+ * @brief Connect DMA TX channel to a given peripheral
+ */
+static inline void ahb_dma_ll_tx_connect_to_periph(ahb_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
+{
+    (void)periph;
+    dev->channel[channel].out.out_peri_sel.peri_out_sel_chn = periph_id;
+}
+
+/**
+ * @brief Disconnect DMA TX channel from peripheral
+ */
+static inline void ahb_dma_ll_tx_disconnect_from_periph(ahb_dma_dev_t *dev, uint32_t channel)
+{
+    dev->channel[channel].out.out_peri_sel.peri_out_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
+}
+
+/**
+ * @brief Whether to enable the ETM subsystem for TX channel
+ *
+ * @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
+ */
+static inline void ahb_dma_ll_tx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->channel[channel].out.out_conf0.out_etm_en_chn = enable;
+}
+
+#ifdef __cplusplus
+}
+#endif

+ 453 - 0
components/hal/esp32p4/include/axi_dma_ll.h

@@ -0,0 +1,453 @@
+/*
+ * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include "hal/gdma_types.h"
+#include "hal/gdma_ll.h"
+#include "soc/axi_dma_struct.h"
+#include "soc/axi_dma_reg.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AXI_DMA_LL_GET_HW(id) (((id) == 0) ? (&AXI_DMA) : NULL)
+
+// any "dummy" peripheral ID can be used for M2M mode
+#define AXI_DMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFFC0)
+#define AXI_DMA_LL_INVALID_PERIPH_ID       (0x3F)
+
+///////////////////////////////////// Common /////////////////////////////////////////
+/**
+ * @brief Force enable register clock
+ */
+static inline void axi_dma_ll_force_enable_reg_clock(axi_dma_dev_t *dev, bool enable)
+{
+    dev->misc_conf.clk_en = enable;
+}
+
+/**
+ * @brief Disable priority arbitration
+ *
+ * @param dev DMA register base address
+ * @param dis True to disable, false to enable
+ */
+static inline void axi_dma_ll_disable_prio_arb(axi_dma_dev_t *dev, bool dis)
+{
+    dev->misc_conf.arb_pri_dis = dis;
+}
+
+/**
+ * @brief Reset DMA FSM (Read and Write)
+ *
+ * @param dev DMA register base address
+ */
+static inline void axi_dma_ll_reset_fsm(axi_dma_dev_t *dev)
+{
+    dev->misc_conf.axim_rst_rd_inter = 1;
+    dev->misc_conf.axim_rst_rd_inter = 0;
+    dev->misc_conf.axim_rst_wr_inter = 1;
+    dev->misc_conf.axim_rst_wr_inter = 0;
+}
+
+///////////////////////////////////// RX /////////////////////////////////////////
+/**
+ * @brief Get DMA RX channel interrupt status word
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_rx_get_interrupt_status(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->in[channel].intr.st.val;
+}
+
+/**
+ * @brief Enable DMA RX channel interrupt
+ */
+static inline void axi_dma_ll_rx_enable_interrupt(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
+{
+    if (enable) {
+        dev->in[channel].intr.ena.val |= mask;
+    } else {
+        dev->in[channel].intr.ena.val &= ~mask;
+    }
+}
+
+/**
+ * @brief Clear DMA RX channel interrupt
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_rx_clear_interrupt_status(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask)
+{
+    dev->in[channel].intr.clr.val = mask;
+}
+
+/**
+ * @brief Get DMA RX channel interrupt status register address
+ */
+static inline volatile void *axi_dma_ll_rx_get_interrupt_status_reg(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return (volatile void *)(&dev->in[channel].intr.st);
+}
+
+/**
+ * @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
+ */
+static inline void axi_dma_ll_rx_enable_owner_check(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->in[channel].conf.in_conf1.in_check_owner_chn = enable;
+}
+
+/**
+ * @brief Enable DMA RX channel burst reading data, disabled by default
+ */
+static inline void axi_dma_ll_rx_enable_data_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    // TODO: IDF-6504
+}
+
+/**
+ * @brief Enable DMA RX channel burst reading descriptor link, disabled by default
+ */
+static inline void axi_dma_ll_rx_enable_descriptor_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->in[channel].conf.in_conf0.indscr_burst_en_chn = enable;
+}
+
+/**
+ * @brief Reset DMA RX channel FSM and FIFO pointer
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_rx_reset_channel(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->in[channel].conf.in_conf0.in_rst_chn = 1;
+    dev->in[channel].conf.in_conf0.in_rst_chn = 0;
+}
+
+/**
+ * @brief Pop data from DMA RX FIFO
+ */
+static inline uint32_t axi_dma_ll_rx_pop_data(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->in[channel].conf.in_pop.infifo_pop_chn = 1;
+    return dev->in[channel].conf.in_pop.infifo_rdata_chn;
+}
+
+/**
+ * @brief Set the descriptor link base address for RX channel
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_rx_set_desc_addr(axi_dma_dev_t *dev, uint32_t channel, uint32_t addr)
+{
+    dev->in[channel].conf.in_link2.inlink_addr_chn = addr;
+}
+
+/**
+ * @brief Start dealing with RX descriptors
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_rx_start(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->in[channel].conf.in_link1.inlink_start_chn = 1;
+}
+
+/**
+ * @brief Stop dealing with RX descriptors
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_rx_stop(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->in[channel].conf.in_link1.inlink_stop_chn = 1;
+}
+
+/**
+ * @brief Restart a new inlink right after the last descriptor
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_rx_restart(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->in[channel].conf.in_link1.inlink_restart_chn = 1;
+}
+
+/**
+ * @brief Enable DMA RX to return the address of current descriptor when receives error
+ */
+static inline void axi_dma_ll_rx_enable_auto_return(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->in[channel].conf.in_link1.inlink_auto_ret_chn = enable;
+}
+
+/**
+ * @brief Check if DMA RX FSM is in IDLE state
+ */
+static inline bool axi_dma_ll_rx_is_fsm_idle(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->in[channel].conf.in_link1.inlink_park_chn;
+}
+
+/**
+ * @brief Get RX success EOF descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_rx_get_success_eof_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->in[channel].conf.in_suc_eof_des_addr.val;
+}
+
+/**
+ * @brief Get RX error EOF descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_rx_get_error_eof_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->in[channel].conf.in_err_eof_des_addr.val;
+}
+
+/**
+ * @brief Get the pre-fetched RX descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_rx_get_prefetched_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->in[channel].conf.in_dscr.val;
+}
+
+/**
+ * @brief Set priority for DMA RX channel
+ */
+static inline void axi_dma_ll_rx_set_priority(axi_dma_dev_t *dev, uint32_t channel, uint32_t prio)
+{
+    dev->in[channel].conf.in_pri.rx_pri_chn = prio;
+}
+
+/**
+ * @brief Connect DMA RX channel to a given peripheral
+ */
+static inline void axi_dma_ll_rx_connect_to_periph(axi_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
+{
+    dev->in[channel].conf.in_peri_sel.peri_in_sel_chn = periph_id;
+    dev->in[channel].conf.in_conf0.mem_trans_en_chn = (periph == GDMA_TRIG_PERIPH_M2M);
+}
+
+/**
+ * @brief Disconnect DMA RX channel from peripheral
+ */
+static inline void axi_dma_ll_rx_disconnect_from_periph(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->in[channel].conf.in_peri_sel.peri_in_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
+    dev->in[channel].conf.in_conf0.mem_trans_en_chn = false;
+}
+
+/**
+ * @brief Whether to enable the ETM subsystem for RX channel
+ *
+ * @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
+ */
+static inline void axi_dma_ll_rx_enable_etm_task(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->in[channel].conf.in_conf0.in_etm_en_chn = enable;
+}
+
+///////////////////////////////////// TX /////////////////////////////////////////
+/**
+ * @brief Get DMA TX channel interrupt status word
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_tx_get_interrupt_status(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->out[channel].intr.st.val;
+}
+
+/**
+ * @brief Enable DMA TX channel interrupt
+ */
+static inline void axi_dma_ll_tx_enable_interrupt(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
+{
+    if (enable) {
+        dev->out[channel].intr.ena.val |= mask;
+    } else {
+        dev->out[channel].intr.ena.val &= ~mask;
+    }
+}
+
+/**
+ * @brief Clear DMA TX channel interrupt
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_tx_clear_interrupt_status(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask)
+{
+    dev->out[channel].intr.clr.val = mask;
+}
+
+/**
+ * @brief Get DMA TX channel interrupt status register address
+ */
+static inline volatile void *axi_dma_ll_tx_get_interrupt_status_reg(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return (volatile void *)(&dev->out[channel].intr.st);
+}
+
+/**
+ * @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
+ */
+static inline void axi_dma_ll_tx_enable_owner_check(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->out[channel].conf.out_conf1.out_check_owner_chn = enable;
+}
+
+/**
+ * @brief Enable DMA TX channel burst sending data, disabled by default
+ */
+static inline void axi_dma_ll_tx_enable_data_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    // TODO: IDF-6504
+}
+
+/**
+ * @brief Enable DMA TX channel burst reading descriptor link, disabled by default
+ */
+static inline void axi_dma_ll_tx_enable_descriptor_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->out[channel].conf.out_conf0.outdscr_burst_en_chn = enable;
+}
+
+/**
+ * @brief Set TX channel EOF mode
+ */
+static inline void axi_dma_ll_tx_set_eof_mode(axi_dma_dev_t *dev, uint32_t channel, uint32_t mode)
+{
+    dev->out[channel].conf.out_conf0.out_eof_mode_chn = mode;
+}
+
+/**
+ * @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
+ */
+static inline void axi_dma_ll_tx_enable_auto_write_back(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->out[channel].conf.out_conf0.out_auto_wrback_chn = enable;
+}
+
+/**
+ * @brief Reset DMA TX channel FSM and FIFO pointer
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_tx_reset_channel(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->out[channel].conf.out_conf0.out_rst_chn = 1;
+    dev->out[channel].conf.out_conf0.out_rst_chn = 0;
+}
+
+/**
+ * @brief Push data into DMA TX FIFO
+ */
+static inline void axi_dma_ll_tx_push_data(axi_dma_dev_t *dev, uint32_t channel, uint32_t data)
+{
+    dev->out[channel].conf.out_push.outfifo_wdata_chn = data;
+    dev->out[channel].conf.out_push.outfifo_push_chn = 1;
+}
+
+/**
+ * @brief Set the descriptor link base address for TX channel
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_tx_set_desc_addr(axi_dma_dev_t *dev, uint32_t channel, uint32_t addr)
+{
+    dev->out[channel].conf.out_link2.outlink_addr_chn = addr;
+}
+
+/**
+ * @brief Start dealing with TX descriptors
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_tx_start(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->out[channel].conf.out_link1.outlink_start_chn = 1;
+}
+
+/**
+ * @brief Stop dealing with TX descriptors
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_tx_stop(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->out[channel].conf.out_link1.outlink_stop_chn = 1;
+}
+
+/**
+ * @brief Restart a new outlink right after the last descriptor
+ */
+__attribute__((always_inline))
+static inline void axi_dma_ll_tx_restart(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->out[channel].conf.out_link1.outlink_restart_chn = 1;
+}
+
+/**
+ * @brief Check if DMA TX FSM is in IDLE state
+ */
+static inline bool axi_dma_ll_tx_is_fsm_idle(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->out[channel].conf.out_link1.outlink_park_chn;
+}
+
+/**
+ * @brief Get TX EOF descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_tx_get_eof_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->out[channel].conf.out_eof_des_addr.val;
+}
+
+/**
+ * @brief Get the pre-fetched TX descriptor's address
+ */
+__attribute__((always_inline))
+static inline uint32_t axi_dma_ll_tx_get_prefetched_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
+{
+    return dev->out[channel].conf.out_dscr.val;
+}
+
+/**
+ * @brief Set priority for DMA TX channel
+ */
+static inline void axi_dma_ll_tx_set_priority(axi_dma_dev_t *dev, uint32_t channel, uint32_t prio)
+{
+    dev->out[channel].conf.out_pri.tx_pri_chn = prio;
+}
+
+/**
+ * @brief Connect DMA TX channel to a given peripheral
+ */
+static inline void axi_dma_ll_tx_connect_to_periph(axi_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
+{
+    (void)periph;
+    dev->out[channel].conf.out_peri_sel.peri_out_sel_chn = periph_id;
+}
+
+/**
+ * @brief Disconnect DMA TX channel from peripheral
+ */
+static inline void axi_dma_ll_tx_disconnect_from_periph(axi_dma_dev_t *dev, uint32_t channel)
+{
+    dev->out[channel].conf.out_peri_sel.peri_out_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
+}
+
+/**
+ * @brief Whether to enable the ETM subsystem for TX channel
+ *
+ * @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
+ */
+static inline void axi_dma_ll_tx_enable_etm_task(axi_dma_dev_t *dev, uint32_t channel, bool enable)
+{
+    dev->out[channel].conf.out_conf0.out_etm_en_chn = enable;
+}
+
+#ifdef __cplusplus
+}
+#endif

+ 43 - 0
components/hal/esp32p4/include/gdma_ll.h

@@ -0,0 +1,43 @@
+/*
+ * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+/**
+ * @brief The contents defined in this file are common for both AXI-DMA and AHB-DMA
+ */
+
+#pragma once
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define GDMA_LL_CHANNEL_MAX_PRIORITY 5 // supported priority levels: [0,5]
+
+#define GDMA_LL_RX_EVENT_MASK       (0x1F)
+#define GDMA_LL_TX_EVENT_MASK       (0x0F)
+
+#define GDMA_LL_EVENT_TX_TOTAL_EOF  (1<<3)
+#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
+#define GDMA_LL_EVENT_TX_EOF        (1<<1)
+#define GDMA_LL_EVENT_TX_DONE       (1<<0)
+
+#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<4)
+#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<3)
+#define GDMA_LL_EVENT_RX_ERR_EOF    (1<<2)
+#define GDMA_LL_EVENT_RX_SUC_EOF    (1<<1)
+#define GDMA_LL_EVENT_RX_DONE       (1<<0)
+
+#define GDMA_LL_AHB_GROUP_START_ID    0 // AHB GDMA group ID starts from 0
+#define GDMA_LL_AHB_NUM_GROUPS        1 // Number of AHB GDMA groups
+#define GDMA_LL_AHB_PAIRS_PER_GROUP   3 // Number of GDMA pairs in each AHB group
+
+#define GDMA_LL_AXI_GROUP_START_ID    1 // AXI GDMA group ID starts from 1
+#define GDMA_LL_AXI_NUM_GROUPS        1 // Number of AXI GDMA groups
+#define GDMA_LL_AXI_PAIRS_PER_GROUP   3 // Number of GDMA pairs in each AXI group
+
+#ifdef __cplusplus
+}
+#endif

+ 52 - 15
components/hal/esp32s3/include/hal/gdma_ll.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -8,6 +8,7 @@
 #include <stddef.h> /* For NULL declaration */
 #include <stdint.h>
 #include <stdbool.h>
+#include "hal/assert.h"
 #include "hal/gdma_types.h"
 #include "soc/gdma_struct.h"
 #include "soc/gdma_reg.h"
@@ -47,18 +48,22 @@ extern "C" {
 #define GDMA_LL_EVENT_RX_SUC_EOF     (1<<1)
 #define GDMA_LL_EVENT_RX_DONE        (1<<0)
 
-#define GDMA_LL_L2FIFO_BASE_SIZE (16) // Basic size of GDMA Level 2 FIFO
+#define GDMA_LL_L2FIFO_BASE_SIZE     16 // Basic size of GDMA Level 2 FIFO
 
 /* Memory block size value supported by channel */
-#define GDMA_LL_EXT_MEM_BK_SIZE_16B (0)
-#define GDMA_LL_EXT_MEM_BK_SIZE_32B (1)
-#define GDMA_LL_EXT_MEM_BK_SIZE_64B (2)
+#define GDMA_LL_EXT_MEM_BK_SIZE_16B   0
+#define GDMA_LL_EXT_MEM_BK_SIZE_32B   1
+#define GDMA_LL_EXT_MEM_BK_SIZE_64B   2
+
+#define GDMA_LL_AHB_GROUP_START_ID    0 // AHB GDMA group ID starts from 0
+#define GDMA_LL_AHB_NUM_GROUPS        1 // Number of AHB GDMA groups
+#define GDMA_LL_AHB_PAIRS_PER_GROUP   5 // Number of GDMA pairs in each AHB group
 
 ///////////////////////////////////// Common /////////////////////////////////////////
 /**
- * @brief Enable DMA clock gating
+ * @brief Force enable register clock
  */
-static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
+static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
 {
     dev->misc_conf.clk_en = enable;
 }
@@ -137,12 +142,28 @@ static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
 }
 
 /**
- * @brief Set DMA RX channel memory block size
- * @param size_index Supported value: GDMA_LL_EXT_MEM_BK_SIZE_16B/32B/64B
+ * @brief Set DMA RX channel memory block size based on the alignment requirement
+ * @param align Supported value: 16/32/64
  */
-static inline void gdma_ll_rx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
+static inline void gdma_ll_rx_set_ext_mem_block_size(gdma_dev_t *dev, uint32_t channel, uint8_t align)
 {
-    dev->channel[channel].in.conf1.in_ext_mem_bk_size = size_index;
+    uint32_t block_size = 0;
+    switch (align) {
+    case 64: // 64 Bytes alignment
+        block_size = GDMA_LL_EXT_MEM_BK_SIZE_64B;
+        break;
+    case 32: // 32 Bytes alignment
+        block_size = GDMA_LL_EXT_MEM_BK_SIZE_32B;
+        break;
+    case 16: // 16 Bytes alignment
+        block_size = GDMA_LL_EXT_MEM_BK_SIZE_16B;
+        break;
+    default:
+        HAL_ASSERT(false);
+        break;
+    }
+
+    dev->channel[channel].in.conf1.in_ext_mem_bk_size = block_size;
 }
 
 /**
@@ -401,12 +422,28 @@ static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
 }
 
 /**
- * @brief Set DMA TX channel memory block size
- * @param size_index Supported value: GDMA_LL_EXT_MEM_BK_SIZE_16B/32B/64B
+ * @brief Set DMA TX channel memory block size based on the alignment requirement
+ * @param align Supported value: 16/32/64
  */
-static inline void gdma_ll_tx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
+static inline void gdma_ll_tx_set_ext_mem_block_size(gdma_dev_t *dev, uint32_t channel, uint8_t align)
 {
-    dev->channel[channel].out.conf1.out_ext_mem_bk_size = size_index;
+    uint32_t block_size = 0;
+    switch (align) {
+    case 64: // 64 Bytes alignment
+        block_size = GDMA_LL_EXT_MEM_BK_SIZE_64B;
+        break;
+    case 32: // 32 Bytes alignment
+        block_size = GDMA_LL_EXT_MEM_BK_SIZE_32B;
+        break;
+    case 16: // 16 Bytes alignment
+        block_size = GDMA_LL_EXT_MEM_BK_SIZE_16B;
+        break;
+    default:
+        HAL_ASSERT(false);
+        break;
+    }
+
+    dev->channel[channel].out.conf1.out_ext_mem_bk_size = block_size;
 }
 
 /**

+ 0 - 13
components/hal/gdma_hal.c

@@ -1,13 +0,0 @@
-/*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
- *
- * SPDX-License-Identifier: Apache-2.0
- */
-
-#include "hal/gdma_hal.h"
-#include "hal/gdma_ll.h"
-
-void gdma_hal_init(gdma_hal_context_t *hal, int group_id)
-{
-    hal->dev = GDMA_LL_GET_HW(group_id);
-}

+ 181 - 0
components/hal/gdma_hal_ahb_v1.c

@@ -0,0 +1,181 @@
+/*
+ * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include "soc/soc_caps.h"
+#include "hal/assert.h"
+#include "hal/gdma_hal_ahb.h"
+#include "hal/gdma_ll.h"
+
+static gdma_hal_priv_data_t gdma_ahb_hal_priv_data = {
+    .m2m_free_periph_mask = GDMA_LL_M2M_FREE_PERIPH_ID_MASK,
+};
+
+void gdma_ahb_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_set_desc_addr(hal->dev, chan_id, desc_base_addr);
+        gdma_ll_rx_start(hal->dev, chan_id);
+    } else {
+        gdma_ll_tx_set_desc_addr(hal->dev, chan_id, desc_base_addr);
+        gdma_ll_tx_start(hal->dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_stop(hal->dev, chan_id);
+    } else {
+        gdma_ll_tx_stop(hal->dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_restart(hal->dev, chan_id);
+    } else {
+        gdma_ll_tx_restart(hal->dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_reset_channel(hal->dev, chan_id);
+    } else {
+        gdma_ll_tx_reset_channel(hal->dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_set_priority(hal->dev, chan_id, priority);
+    } else {
+        gdma_ll_tx_set_priority(hal->dev, chan_id, priority);
+    }
+}
+
+void gdma_ahb_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_reset_channel(hal->dev, chan_id); // reset channel
+        gdma_ll_rx_connect_to_periph(hal->dev, chan_id, periph, periph_sub_id);
+    } else {
+        gdma_ll_tx_reset_channel(hal->dev, chan_id); // reset channel
+        gdma_ll_tx_connect_to_periph(hal->dev, chan_id, periph, periph_sub_id);
+    }
+}
+
+void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_disconnect_from_periph(hal->dev, chan_id);
+    } else {
+        gdma_ll_tx_disconnect_from_periph(hal->dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_enable_data_burst(hal->dev, chan_id, en_data_burst);
+        gdma_ll_rx_enable_descriptor_burst(hal->dev, chan_id, en_desc_burst);
+    } else {
+        gdma_ll_tx_enable_data_burst(hal->dev, chan_id, en_data_burst);
+        gdma_ll_tx_enable_descriptor_burst(hal->dev, chan_id, en_desc_burst);
+    }
+}
+
+#if SOC_AHB_GDMA_SUPPORT_PSRAM
+void gdma_ahb_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_set_ext_mem_block_size(hal->dev, chan_id, align);
+    } else {
+        gdma_ll_tx_set_ext_mem_block_size(hal->dev, chan_id, align);
+    }
+}
+#endif
+
+void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_enable_owner_check(hal->dev, chan_id, en_owner_check);
+    } else {
+        gdma_ll_tx_enable_owner_check(hal->dev, chan_id, en_owner_check);
+        gdma_ll_tx_enable_auto_write_back(hal->dev, chan_id, en_desc_write_back);
+    }
+}
+
+void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_enable_interrupt(hal->dev, chan_id, intr_event_mask, en_or_dis);
+    } else {
+        gdma_ll_tx_enable_interrupt(hal->dev, chan_id, intr_event_mask, en_or_dis);
+    }
+}
+
+void gdma_ahb_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        gdma_ll_rx_clear_interrupt_status(hal->dev, chan_id, intr_event_mask);
+    } else {
+        gdma_ll_tx_clear_interrupt_status(hal->dev, chan_id, intr_event_mask);
+    }
+}
+
+uint32_t gdma_ahb_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return gdma_ll_rx_get_interrupt_status(hal->dev, chan_id);
+    } else {
+        return gdma_ll_tx_get_interrupt_status(hal->dev, chan_id);
+    }
+}
+
+uint32_t gdma_ahb_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return (uint32_t)gdma_ll_rx_get_interrupt_status_reg(hal->dev, chan_id);
+    } else {
+        return (uint32_t)gdma_ll_tx_get_interrupt_status_reg(hal->dev, chan_id);
+    }
+}
+
+uint32_t gdma_ahb_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return gdma_ll_rx_get_success_eof_desc_addr(hal->dev, chan_id);
+    } else {
+        return gdma_ll_tx_get_eof_desc_addr(hal->dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
+{
+    hal->dev = GDMA_LL_GET_HW(config->group_id - GDMA_LL_AHB_GROUP_START_ID);
+    hal->start_with_desc = gdma_ahb_hal_start_with_desc;
+    hal->stop = gdma_ahb_hal_stop;
+    hal->append = gdma_ahb_hal_append;
+    hal->reset = gdma_ahb_hal_reset;
+    hal->set_priority = gdma_ahb_hal_set_priority;
+    hal->connect_peri = gdma_ahb_hal_connect_peri;
+    hal->disconnect_peri = gdma_ahb_hal_disconnect_peri;
+    hal->enable_burst = gdma_ahb_hal_enable_burst;
+    hal->set_strategy = gdma_ahb_hal_set_strategy;
+    hal->enable_intr = gdma_ahb_hal_enable_intr;
+    hal->clear_intr = gdma_ahb_hal_clear_intr;
+    hal->read_intr_status = gdma_ahb_hal_read_intr_status;
+    hal->get_intr_status_reg = gdma_ahb_hal_get_intr_status_reg;
+    hal->get_eof_desc_addr = gdma_ahb_hal_get_eof_desc_addr;
+#if SOC_AHB_GDMA_SUPPORT_PSRAM
+    hal->set_ext_mem_align = gdma_ahb_hal_set_ext_mem_align;
+#endif // SOC_AHB_GDMA_SUPPORT_PSRAM
+    hal->priv_data = &gdma_ahb_hal_priv_data;
+}

+ 167 - 0
components/hal/gdma_hal_ahb_v2.c

@@ -0,0 +1,167 @@
+/*
+ * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include "soc/soc_caps.h"
+#include "hal/assert.h"
+#include "hal/gdma_hal_ahb.h"
+#include "hal/ahb_dma_ll.h"
+
+static gdma_hal_priv_data_t gdma_ahb_hal_priv_data = {
+    .m2m_free_periph_mask = AHB_DMA_LL_M2M_FREE_PERIPH_ID_MASK,
+};
+
+void gdma_ahb_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_set_desc_addr(hal->ahb_dma_dev, chan_id, desc_base_addr);
+        ahb_dma_ll_rx_start(hal->ahb_dma_dev, chan_id);
+    } else {
+        ahb_dma_ll_tx_set_desc_addr(hal->ahb_dma_dev, chan_id, desc_base_addr);
+        ahb_dma_ll_tx_start(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_stop(hal->ahb_dma_dev, chan_id);
+    } else {
+        ahb_dma_ll_tx_stop(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_restart(hal->ahb_dma_dev, chan_id);
+    } else {
+        ahb_dma_ll_tx_restart(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_reset_channel(hal->ahb_dma_dev, chan_id);
+    } else {
+        ahb_dma_ll_tx_reset_channel(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_set_priority(hal->ahb_dma_dev, chan_id, priority);
+    } else {
+        ahb_dma_ll_tx_set_priority(hal->ahb_dma_dev, chan_id, priority);
+    }
+}
+
+void gdma_ahb_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_reset_channel(hal->ahb_dma_dev, chan_id); // reset channel
+        ahb_dma_ll_rx_connect_to_periph(hal->ahb_dma_dev, chan_id, periph, periph_sub_id);
+    } else {
+        ahb_dma_ll_tx_reset_channel(hal->ahb_dma_dev, chan_id); // reset channel
+        ahb_dma_ll_tx_connect_to_periph(hal->ahb_dma_dev, chan_id, periph, periph_sub_id);
+    }
+}
+
+void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_disconnect_from_periph(hal->ahb_dma_dev, chan_id);
+    } else {
+        ahb_dma_ll_tx_disconnect_from_periph(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_enable_data_burst(hal->ahb_dma_dev, chan_id, en_data_burst);
+        ahb_dma_ll_rx_enable_descriptor_burst(hal->ahb_dma_dev, chan_id, en_desc_burst);
+    } else {
+        ahb_dma_ll_tx_enable_data_burst(hal->ahb_dma_dev, chan_id, en_data_burst);
+        ahb_dma_ll_tx_enable_descriptor_burst(hal->ahb_dma_dev, chan_id, en_desc_burst);
+    }
+}
+
+void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_enable_owner_check(hal->ahb_dma_dev, chan_id, en_owner_check);
+    } else {
+        ahb_dma_ll_tx_enable_owner_check(hal->ahb_dma_dev, chan_id, en_owner_check);
+        ahb_dma_ll_tx_enable_auto_write_back(hal->ahb_dma_dev, chan_id, en_desc_write_back);
+    }
+}
+
+void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_enable_interrupt(hal->ahb_dma_dev, chan_id, intr_event_mask, en_or_dis);
+    } else {
+        ahb_dma_ll_tx_enable_interrupt(hal->ahb_dma_dev, chan_id, intr_event_mask, en_or_dis);
+    }
+}
+
+void gdma_ahb_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        ahb_dma_ll_rx_clear_interrupt_status(hal->ahb_dma_dev, chan_id, intr_event_mask);
+    } else {
+        ahb_dma_ll_tx_clear_interrupt_status(hal->ahb_dma_dev, chan_id, intr_event_mask);
+    }
+}
+
+uint32_t gdma_ahb_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return ahb_dma_ll_rx_get_interrupt_status(hal->ahb_dma_dev, chan_id);
+    } else {
+        return ahb_dma_ll_tx_get_interrupt_status(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+uint32_t gdma_ahb_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return (uint32_t)ahb_dma_ll_rx_get_interrupt_status_reg(hal->ahb_dma_dev, chan_id);
+    } else {
+        return (uint32_t)ahb_dma_ll_tx_get_interrupt_status_reg(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+uint32_t gdma_ahb_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return ahb_dma_ll_rx_get_success_eof_desc_addr(hal->ahb_dma_dev, chan_id);
+    } else {
+        return ahb_dma_ll_tx_get_eof_desc_addr(hal->ahb_dma_dev, chan_id);
+    }
+}
+
+void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
+{
+    hal->ahb_dma_dev = AHB_DMA_LL_GET_HW(config->group_id - GDMA_LL_AHB_GROUP_START_ID);
+    hal->start_with_desc = gdma_ahb_hal_start_with_desc;
+    hal->stop = gdma_ahb_hal_stop;
+    hal->append = gdma_ahb_hal_append;
+    hal->reset = gdma_ahb_hal_reset;
+    hal->set_priority = gdma_ahb_hal_set_priority;
+    hal->connect_peri = gdma_ahb_hal_connect_peri;
+    hal->disconnect_peri = gdma_ahb_hal_disconnect_peri;
+    hal->enable_burst = gdma_ahb_hal_enable_burst;
+    hal->set_strategy = gdma_ahb_hal_set_strategy;
+    hal->enable_intr = gdma_ahb_hal_enable_intr;
+    hal->clear_intr = gdma_ahb_hal_clear_intr;
+    hal->read_intr_status = gdma_ahb_hal_read_intr_status;
+    hal->get_intr_status_reg = gdma_ahb_hal_get_intr_status_reg;
+    hal->get_eof_desc_addr = gdma_ahb_hal_get_eof_desc_addr;
+    hal->priv_data = &gdma_ahb_hal_priv_data;
+}

+ 167 - 0
components/hal/gdma_hal_axi.c

@@ -0,0 +1,167 @@
+/*
+ * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include "soc/soc_caps.h"
+#include "hal/assert.h"
+#include "hal/gdma_hal_axi.h"
+#include "hal/axi_dma_ll.h"
+
+static gdma_hal_priv_data_t gdma_axi_hal_priv_data = {
+    .m2m_free_periph_mask = AXI_DMA_LL_M2M_FREE_PERIPH_ID_MASK,
+};
+
+void gdma_axi_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_set_desc_addr(hal->axi_dma_dev, chan_id, desc_base_addr);
+        axi_dma_ll_rx_start(hal->axi_dma_dev, chan_id);
+    } else {
+        axi_dma_ll_tx_set_desc_addr(hal->axi_dma_dev, chan_id, desc_base_addr);
+        axi_dma_ll_tx_start(hal->axi_dma_dev, chan_id);
+    }
+}
+
+void gdma_axi_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_stop(hal->axi_dma_dev, chan_id);
+    } else {
+        axi_dma_ll_tx_stop(hal->axi_dma_dev, chan_id);
+    }
+}
+
+void gdma_axi_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_restart(hal->axi_dma_dev, chan_id);
+    } else {
+        axi_dma_ll_tx_restart(hal->axi_dma_dev, chan_id);
+    }
+}
+
+void gdma_axi_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_reset_channel(hal->axi_dma_dev, chan_id);
+    } else {
+        axi_dma_ll_tx_reset_channel(hal->axi_dma_dev, chan_id);
+    }
+}
+
+void gdma_axi_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_set_priority(hal->axi_dma_dev, chan_id, priority);
+    } else {
+        axi_dma_ll_tx_set_priority(hal->axi_dma_dev, chan_id, priority);
+    }
+}
+
+void gdma_axi_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_reset_channel(hal->axi_dma_dev, chan_id); // reset channel
+        axi_dma_ll_rx_connect_to_periph(hal->axi_dma_dev, chan_id, periph, periph_sub_id);
+    } else {
+        axi_dma_ll_tx_reset_channel(hal->axi_dma_dev, chan_id); // reset channel
+        axi_dma_ll_tx_connect_to_periph(hal->axi_dma_dev, chan_id, periph, periph_sub_id);
+    }
+}
+
+void gdma_axi_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_disconnect_from_periph(hal->axi_dma_dev, chan_id);
+    } else {
+        axi_dma_ll_tx_disconnect_from_periph(hal->axi_dma_dev, chan_id);
+    }
+}
+
+void gdma_axi_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_enable_data_burst(hal->axi_dma_dev, chan_id, en_data_burst);
+        axi_dma_ll_rx_enable_descriptor_burst(hal->axi_dma_dev, chan_id, en_desc_burst);
+    } else {
+        axi_dma_ll_tx_enable_data_burst(hal->axi_dma_dev, chan_id, en_data_burst);
+        axi_dma_ll_tx_enable_descriptor_burst(hal->axi_dma_dev, chan_id, en_desc_burst);
+    }
+}
+
+void gdma_axi_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_enable_owner_check(hal->axi_dma_dev, chan_id, en_owner_check);
+    } else {
+        axi_dma_ll_tx_enable_owner_check(hal->axi_dma_dev, chan_id, en_owner_check);
+        axi_dma_ll_tx_enable_auto_write_back(hal->axi_dma_dev, chan_id, en_desc_write_back);
+    }
+}
+
+void gdma_axi_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_enable_interrupt(hal->axi_dma_dev, chan_id, intr_event_mask, en_or_dis);
+    } else {
+        axi_dma_ll_tx_enable_interrupt(hal->axi_dma_dev, chan_id, intr_event_mask, en_or_dis);
+    }
+}
+
+void gdma_axi_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        axi_dma_ll_rx_clear_interrupt_status(hal->axi_dma_dev, chan_id, intr_event_mask);
+    } else {
+        axi_dma_ll_tx_clear_interrupt_status(hal->axi_dma_dev, chan_id, intr_event_mask);
+    }
+}
+
+uint32_t gdma_axi_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return axi_dma_ll_rx_get_interrupt_status(hal->axi_dma_dev, chan_id);
+    } else {
+        return axi_dma_ll_tx_get_interrupt_status(hal->axi_dma_dev, chan_id);
+    }
+}
+
+uint32_t gdma_axi_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return (uint32_t)axi_dma_ll_rx_get_interrupt_status_reg(hal->axi_dma_dev, chan_id);
+    } else {
+        return (uint32_t)axi_dma_ll_tx_get_interrupt_status_reg(hal->axi_dma_dev, chan_id);
+    }
+}
+
+uint32_t gdma_axi_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    if (dir == GDMA_CHANNEL_DIRECTION_RX) {
+        return axi_dma_ll_rx_get_success_eof_desc_addr(hal->axi_dma_dev, chan_id);
+    } else {
+        return axi_dma_ll_tx_get_eof_desc_addr(hal->axi_dma_dev, chan_id);
+    }
+}
+
+void gdma_axi_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
+{
+    hal->axi_dma_dev = AXI_DMA_LL_GET_HW(config->group_id - GDMA_LL_AXI_GROUP_START_ID);
+    hal->start_with_desc = gdma_axi_hal_start_with_desc;
+    hal->stop = gdma_axi_hal_stop;
+    hal->append = gdma_axi_hal_append;
+    hal->reset = gdma_axi_hal_reset;
+    hal->set_priority = gdma_axi_hal_set_priority;
+    hal->connect_peri = gdma_axi_hal_connect_peri;
+    hal->disconnect_peri = gdma_axi_hal_disconnect_peri;
+    hal->enable_burst = gdma_axi_hal_enable_burst;
+    hal->set_strategy = gdma_axi_hal_set_strategy;
+    hal->enable_intr = gdma_axi_hal_enable_intr;
+    hal->clear_intr = gdma_axi_hal_clear_intr;
+    hal->read_intr_status = gdma_axi_hal_read_intr_status;
+    hal->get_intr_status_reg = gdma_axi_hal_get_intr_status_reg;
+    hal->get_eof_desc_addr = gdma_axi_hal_get_eof_desc_addr;
+    hal->priv_data = &gdma_axi_hal_priv_data;
+}

+ 91 - 0
components/hal/gdma_hal_top.c

@@ -0,0 +1,91 @@
+/*
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include <stdlib.h>
+#include "hal/assert.h"
+#include "hal/gdma_hal.h"
+
+void gdma_hal_deinit(gdma_hal_context_t *hal)
+{
+    hal->generic_dev = NULL;
+}
+
+void gdma_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
+{
+    hal->start_with_desc(hal, chan_id, dir, desc_base_addr);
+}
+
+void gdma_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    hal->stop(hal, chan_id, dir);
+}
+
+void gdma_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    hal->append(hal, chan_id, dir);
+}
+
+void gdma_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    hal->reset(hal, chan_id, dir);
+}
+
+void gdma_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
+{
+    hal->set_priority(hal, chan_id, dir, priority);
+}
+
+void gdma_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
+{
+    hal->connect_peri(hal, chan_id, dir, periph, periph_sub_id);
+}
+
+void gdma_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    hal->disconnect_peri(hal, chan_id, dir);
+}
+
+void gdma_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
+{
+    hal->enable_burst(hal, chan_id, dir, en_data_burst, en_desc_burst);
+}
+
+void gdma_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align)
+{
+    if (hal->set_ext_mem_align) {
+        hal->set_ext_mem_align(hal, chan_id, dir, align);
+    }
+}
+
+void gdma_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
+{
+    hal->set_strategy(hal, chan_id, dir, en_owner_check, en_desc_write_back);
+}
+
+void gdma_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
+{
+    hal->enable_intr(hal, chan_id, dir, intr_event_mask, en_or_dis);
+}
+
+void gdma_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
+{
+    hal->clear_intr(hal, chan_id, dir, intr_event_mask);
+}
+
+uint32_t gdma_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    return hal->read_intr_status(hal, chan_id, dir);
+}
+
+uint32_t gdma_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    return hal->get_intr_status_reg(hal, chan_id, dir);
+}
+
+uint32_t gdma_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
+{
+    return hal->get_eof_desc_addr(hal, chan_id, dir);
+}

+ 99 - 15
components/hal/include/hal/gdma_hal.h

@@ -1,34 +1,118 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
 
-/*******************************************************************************
- * NOTICE
- * The HAL is not public api, don't use in application code.
- * See readme.md in soc/README.md
- ******************************************************************************/
-
 #pragma once
 
+#include <stdbool.h>
+#include <stdint.h>
+#include "soc/soc_caps.h"
+#include "hal/gdma_types.h"
+
+// TODO: don't expose the SOC header files, we can typedef a new type for the register dev pointer
+#if SOC_AHB_GDMA_VERSION == 1
+#include "soc/gdma_struct.h"
+#endif
+#if SOC_AHB_GDMA_VERSION == 2
+#include "soc/ahb_dma_struct.h"
+#endif
+#if SOC_AXI_GDMA_SUPPORTED
+#include "soc/axi_dma_struct.h"
+#endif
+
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#include "soc/soc_caps.h"
-
-#if SOC_GDMA_SUPPORTED
-
-#include "soc/gdma_struct.h"
+/// forward declaration of the HAL context
+typedef struct gdma_hal_context_t gdma_hal_context_t;
 
+/**
+ * @brief GDMA HAL configuration
+ */
 typedef struct {
-    gdma_dev_t *dev;
-} gdma_hal_context_t;
+    int group_id;  /*!< GDMA group ID */
+} gdma_hal_config_t;
 
-void gdma_hal_init(gdma_hal_context_t *hal, int group_id);
+/**
+ * @brief GDMA HAL private data
+ */
+typedef struct {
+    // The bitmap of the IDs that can be used by M2M are different between AXI DMA and AHB DMA, so we need to save a copy for each of them
+    uint32_t m2m_free_periph_mask;
+    // TODO: we can add more private data here, e.g. the interrupt event mask of interest
+    // for now, the AXI DMA and AHB DMA are sharing the same interrupt mask, so we don't need to store it here
+    // If one day they become incompatible, we shall save a copy for each of them as a private data
+} gdma_hal_priv_data_t;
 
+/**
+ * @brief HAL context definition
+ */
+struct gdma_hal_context_t {
+    /// the underlying hardware can be different
+    union {
+#if SOC_AHB_GDMA_VERSION == 1
+        gdma_dev_t *dev;
+#endif
+#if SOC_AHB_GDMA_VERSION == 2
+        ahb_dma_dev_t *ahb_dma_dev;
 #endif
+#if SOC_AXI_GDMA_SUPPORTED
+        axi_dma_dev_t *axi_dma_dev;
+#endif
+        void *generic_dev;
+    };
+    gdma_hal_priv_data_t *priv_data; /// private data for the HAL
+    void (*start_with_desc)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr); /// start the channel with the start address of the descriptor
+    void (*stop)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// stop the channel
+    void (*append)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Append a descriptor to the channel
+    void (*reset)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Reset the channel
+    void (*set_priority)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority); /// Set the channel priority
+    void (*connect_peri)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id); /// Connect the channel to a peripheral
+    void (*disconnect_peri)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Disconnect the channel from a peripheral
+    void (*enable_burst)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst); /// Enable burst mode
+    void (*set_ext_mem_align)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align); /// Set the alignment of the external memory
+    void (*set_strategy)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back); /// Set some misc strategy of the channel behaviour
+    uint32_t (*get_intr_status_reg)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); // Get the interrupt status register address
+    void (*enable_intr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis); /// Enable the channel interrupt
+    void (*clear_intr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask); /// Clear the channel interrupt
+    uint32_t (*read_intr_status)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Read the channel interrupt status
+    uint32_t (*get_eof_desc_addr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Get the address of the descriptor with EOF flag set
+};
+
+void gdma_hal_deinit(gdma_hal_context_t *hal);
+
+void gdma_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr);
+
+void gdma_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority);
+
+void gdma_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id);
+
+void gdma_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
+
+void gdma_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
+
+void gdma_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back);
+
+void gdma_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis);
+
+void gdma_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask);
+
+uint32_t gdma_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+uint32_t gdma_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+uint32_t gdma_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
 
 #ifdef __cplusplus
 }

+ 49 - 0
components/hal/include/hal/gdma_hal_ahb.h

@@ -0,0 +1,49 @@
+/*
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#pragma once
+
+#include "hal/gdma_hal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void gdma_ahb_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr);
+
+void gdma_ahb_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_ahb_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_ahb_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_ahb_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority);
+
+void gdma_ahb_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id);
+
+void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
+
+void gdma_ahb_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
+
+void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back);
+
+void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis);
+
+void gdma_ahb_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask);
+
+uint32_t gdma_ahb_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+uint32_t gdma_ahb_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+uint32_t gdma_ahb_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
+
+#ifdef __cplusplus
+}
+#endif

+ 49 - 0
components/hal/include/hal/gdma_hal_axi.h

@@ -0,0 +1,49 @@
+/*
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#pragma once
+
+#include "hal/gdma_hal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void gdma_axi_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr);
+
+void gdma_axi_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_axi_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_axi_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_axi_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority);
+
+void gdma_axi_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id);
+
+void gdma_axi_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_axi_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
+
+void gdma_axi_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
+
+void gdma_axi_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back);
+
+void gdma_axi_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis);
+
+void gdma_axi_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask);
+
+uint32_t gdma_axi_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+uint32_t gdma_axi_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+uint32_t gdma_axi_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
+
+void gdma_axi_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
+
+#ifdef __cplusplus
+}
+#endif

+ 1 - 2
components/hal/include/hal/gdma_types.h

@@ -13,7 +13,6 @@ extern "C" {
 /**
  * @brief Enumeration of peripherals which have the DMA capability
  * @note Some peripheral might not be available on certain chip, please refer to `soc_caps.h` for detail.
- *
  */
 typedef enum {
     GDMA_TRIG_PERIPH_M2M,  /*!< GDMA trigger peripheral: M2M */
@@ -28,11 +27,11 @@ typedef enum {
     GDMA_TRIG_PERIPH_CAM,  /*!< GDMA trigger peripheral: CAM */
     GDMA_TRIG_PERIPH_RMT,  /*!< GDMA trigger peripheral: RMT */
     GDMA_TRIG_PERIPH_PARLIO, /*!< GDMA trigger peripheral: PARLIO */
+    GDMA_TRIG_PERIPH_I3C,  /*!< GDMA trigger peripheral: I3C */
 } gdma_trigger_peripheral_t;
 
 /**
  * @brief Enumeration of GDMA channel direction
- *
  */
 typedef enum {
     GDMA_CHANNEL_DIRECTION_TX, /*!< GDMA channel direction: TX */

+ 1 - 1
components/hal/spi_hal.c

@@ -13,7 +13,7 @@
 #include "soc/clk_tree_defs.h"
 
 //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
-#if SOC_GDMA_SUPPORTED
+#if SOC_AHB_GDMA_VERSION == 1
 #include "soc/gdma_struct.h"
 #include "hal/gdma_ll.h"
 

+ 1 - 1
components/hal/spi_hal_iram.c

@@ -12,7 +12,7 @@
 #include "soc/soc_caps.h"
 
 //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
-#if SOC_GDMA_SUPPORTED
+#if SOC_AHB_GDMA_VERSION == 1
 #include "soc/gdma_struct.h"
 #include "hal/gdma_ll.h"
 

+ 1 - 1
components/hal/spi_slave_hal.c

@@ -3,7 +3,7 @@
 #include "soc/soc_caps.h"
 
 //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
-#if SOC_GDMA_SUPPORTED
+#if SOC_AHB_GDMA_VERSION == 1
 #include "soc/gdma_struct.h"
 #include "hal/gdma_ll.h"
 

+ 1 - 1
components/hal/spi_slave_hal_iram.c

@@ -3,7 +3,7 @@
 #include "soc/soc_caps.h"
 
 //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
-#if SOC_GDMA_SUPPORTED
+#if SOC_AHB_GDMA_VERSION == 1
 #include "soc/gdma_struct.h"
 #include "hal/gdma_ll.h"
 

+ 1 - 1
components/hal/spi_slave_hd_hal.c

@@ -18,7 +18,7 @@
 #include "hal/assert.h"
 
 //This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
-#if SOC_GDMA_SUPPORTED
+#if SOC_AHB_GDMA_VERSION == 1
 #include "soc/gdma_struct.h"
 #include "hal/gdma_ll.h"
 #define spi_dma_ll_tx_restart(dev, chan)                           gdma_ll_tx_restart(&GDMA, chan)

+ 5 - 14
components/mbedtls/port/aes/dma/esp_aes_gdma_impl.c

@@ -1,19 +1,10 @@
-// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/*
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
 
 #include "esp_aes_dma_priv.h"
-#include "hal/gdma_ll.h"
 #include "esp_crypto_shared_gdma.h"
 
 esp_err_t esp_aes_dma_start(const lldesc_t *input, const lldesc_t *output)

+ 1 - 0
components/mbedtls/port/crypto_shared_gdma/esp_crypto_shared_gdma.c

@@ -60,6 +60,7 @@ static esp_err_t crypto_shared_gdma_init(void)
 
     gdma_transfer_ability_t transfer_ability = {
         .sram_trans_align = 1,
+        .psram_trans_align = 16,
     };
 
 

+ 1 - 1
components/mbedtls/test_apps/main/test_mbedtls_sha.c

@@ -229,7 +229,7 @@ TEST_CASE("mbedtls SHA512 clone", "[mbedtls]")
     TEST_ASSERT_EQUAL_MEMORY_MESSAGE(sha512_thousand_bs, sha512, 64, "SHA512 cloned calculation");
 }
 
-TEST_CASE("mbedtls SHA384 clone", "[mbedtls][")
+TEST_CASE("mbedtls SHA384 clone", "[mbedtls]")
 {
     mbedtls_sha512_context ctx;
     mbedtls_sha512_context clone;

+ 1 - 1
components/mbedtls/test_apps/main/test_sha_perf.c

@@ -18,7 +18,7 @@
 #include "ccomp_timer.h"
 #include "test_mbedtls_utils.h"
 
-TEST_CASE("mbedtls SHA performance", "[aes]")
+TEST_CASE("mbedtls SHA performance", "[mbedtls]")
 {
     const unsigned CALLS = 256;
     const unsigned CALL_SZ = 16 * 1024;

+ 9 - 5
components/soc/esp32c2/include/soc/Kconfig.soc_caps.in

@@ -19,6 +19,10 @@ config SOC_GDMA_SUPPORTED
     bool
     default y
 
+config SOC_AHB_GDMA_SUPPORTED
+    bool
+    default y
+
 config SOC_GPTIMER_SUPPORTED
     bool
     default y
@@ -215,17 +219,17 @@ config SOC_ECC_SUPPORT_POINT_VERIFY_QUIRK
     bool
     default y
 
-config SOC_GDMA_GROUPS
+config SOC_AHB_GDMA_VERSION
     int
     default 1
 
-config SOC_GDMA_PAIRS_PER_GROUP
+config SOC_GDMA_NUM_GROUPS_MAX
     int
     default 1
 
-config SOC_GDMA_TX_RX_SHARE_INTERRUPT
-    bool
-    default y
+config SOC_GDMA_PAIRS_PER_GROUP_MAX
+    int
+    default 1
 
 config SOC_GPIO_PORT
     int

+ 11 - 1
components/soc/esp32c2/include/soc/gdma_channel.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -12,3 +12,13 @@
 #define SOC_GDMA_TRIG_PERIPH_UHCI0   (2)
 #define SOC_GDMA_TRIG_PERIPH_SHA0    (7)
 #define SOC_GDMA_TRIG_PERIPH_ADC0    (8)
+
+// On which system bus is the DMA instance of the peripheral connection mounted
+#define SOC_GDMA_BUS_ANY (-1)
+#define SOC_GDMA_BUS_AHB (0)
+
+#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS     SOC_GDMA_BUS_ANY
+#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS    SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS     SOC_GDMA_BUS_AHB

+ 5 - 5
components/soc/esp32c2/include/soc/soc_caps.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -29,6 +29,7 @@
 #define SOC_DEDICATED_GPIO_SUPPORTED    1
 #define SOC_UART_SUPPORTED              1
 #define SOC_GDMA_SUPPORTED              1
+#define SOC_AHB_GDMA_SUPPORTED          1
 #define SOC_GPTIMER_SUPPORTED           1
 #define SOC_BT_SUPPORTED                1
 #define SOC_WIFI_SUPPORTED              1
@@ -104,9 +105,9 @@
 #define SOC_ECC_SUPPORT_POINT_VERIFY_QUIRK  1  // C2 ECC peripheral has a bug in ECC point verification, if value of K is zero the verification fails
 
 /*-------------------------- GDMA CAPS -------------------------------------*/
-#define SOC_GDMA_GROUPS                 (1U) // Number of GDMA groups
-#define SOC_GDMA_PAIRS_PER_GROUP        (1U) // Number of GDMA pairs in each group
-#define SOC_GDMA_TX_RX_SHARE_INTERRUPT  (1)  // TX and RX channel in the same pair will share the same interrupt source number
+#define SOC_AHB_GDMA_VERSION            1U
+#define SOC_GDMA_NUM_GROUPS_MAX         1U
+#define SOC_GDMA_PAIRS_PER_GROUP_MAX    1U
 
 /*-------------------------- GPIO CAPS ---------------------------------------*/
 // ESP32-C2 has 1 GPIO peripheral
@@ -187,7 +188,6 @@
 /* The SHA engine is able to resume hashing from a user */
 #define SOC_SHA_SUPPORT_RESUME          (1)
 
-
 /* Supported HW algorithms */
 #define SOC_SHA_SUPPORT_SHA1            (1)
 #define SOC_SHA_SUPPORT_SHA224          (1)

+ 10 - 6
components/soc/esp32c3/include/soc/Kconfig.soc_caps.in

@@ -19,6 +19,10 @@ config SOC_GDMA_SUPPORTED
     bool
     default y
 
+config SOC_AHB_GDMA_SUPPORTED
+    bool
+    default y
+
 config SOC_GPTIMER_SUPPORTED
     bool
     default y
@@ -307,17 +311,17 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
     int
     default 1100
 
-config SOC_GDMA_GROUPS
+config SOC_AHB_GDMA_VERSION
     int
     default 1
 
-config SOC_GDMA_PAIRS_PER_GROUP
+config SOC_GDMA_NUM_GROUPS_MAX
     int
-    default 3
+    default 1
 
-config SOC_GDMA_TX_RX_SHARE_INTERRUPT
-    bool
-    default y
+config SOC_GDMA_PAIRS_PER_GROUP_MAX
+    int
+    default 3
 
 config SOC_GPIO_PORT
     int

+ 13 - 1
components/soc/esp32c3/include/soc/gdma_channel.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -14,3 +14,15 @@
 #define SOC_GDMA_TRIG_PERIPH_AES0    (6)
 #define SOC_GDMA_TRIG_PERIPH_SHA0    (7)
 #define SOC_GDMA_TRIG_PERIPH_ADC0    (8)
+
+// On which system bus is the DMA instance of the peripheral connection mounted
+#define SOC_GDMA_BUS_ANY (-1)
+#define SOC_GDMA_BUS_AHB (0)
+
+#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS     SOC_GDMA_BUS_ANY
+#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS    SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_AES0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS     SOC_GDMA_BUS_AHB

+ 6 - 5
components/soc/esp32c3/include/soc/soc_caps.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -29,6 +29,7 @@
 #define SOC_DEDICATED_GPIO_SUPPORTED    1
 #define SOC_UART_SUPPORTED              1
 #define SOC_GDMA_SUPPORTED              1
+#define SOC_AHB_GDMA_SUPPORTED          1
 #define SOC_GPTIMER_SUPPORTED           1
 #define SOC_TWAI_SUPPORTED              1
 #define SOC_BT_SUPPORTED                1
@@ -142,9 +143,9 @@
 #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
 
 /*-------------------------- GDMA CAPS -------------------------------------*/
-#define SOC_GDMA_GROUPS                 (1U) // Number of GDMA groups
-#define SOC_GDMA_PAIRS_PER_GROUP        (3)  // Number of GDMA pairs in each group
-#define SOC_GDMA_TX_RX_SHARE_INTERRUPT  (1)  // TX and RX channel in the same pair will share the same interrupt source number
+#define SOC_AHB_GDMA_VERSION            1U
+#define SOC_GDMA_NUM_GROUPS_MAX         1U
+#define SOC_GDMA_PAIRS_PER_GROUP_MAX    3
 
 /*-------------------------- GPIO CAPS ---------------------------------------*/
 // ESP32-C3 has 1 GPIO peripheral
@@ -426,6 +427,6 @@
 /*---------------------------------- Bluetooth CAPS ----------------------------------*/
 #define SOC_BLE_SUPPORTED               (1)    /*!< Support Bluetooth Low Energy hardware */
 #define SOC_BLE_MESH_SUPPORTED          (1)    /*!< Support BLE MESH */
-#define SOC_BLE_50_SUPPORTED		(1)    /*!< Support Bluetooth 5.0 */
+#define SOC_BLE_50_SUPPORTED            (1)    /*!< Support Bluetooth 5.0 */
 #define SOC_BLE_DEVICE_PRIVACY_SUPPORTED (1)   /*!< Support BLE device privacy mode */
 #define SOC_BLUFI_SUPPORTED             (1)    /*!< Support BLUFI */

+ 10 - 2
components/soc/esp32c6/include/soc/Kconfig.soc_caps.in

@@ -19,6 +19,10 @@ config SOC_GDMA_SUPPORTED
     bool
     default y
 
+config SOC_AHB_GDMA_SUPPORTED
+    bool
+    default y
+
 config SOC_GPTIMER_SUPPORTED
     bool
     default y
@@ -375,11 +379,15 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
     int
     default 1100
 
-config SOC_GDMA_GROUPS
+config SOC_AHB_GDMA_VERSION
+    int
+    default 1
+
+config SOC_GDMA_NUM_GROUPS_MAX
     int
     default 1
 
-config SOC_GDMA_PAIRS_PER_GROUP
+config SOC_GDMA_PAIRS_PER_GROUP_MAX
     int
     default 3
 

+ 13 - 0
components/soc/esp32c6/include/soc/gdma_channel.h

@@ -15,3 +15,16 @@
 #define SOC_GDMA_TRIG_PERIPH_SHA0    (7)
 #define SOC_GDMA_TRIG_PERIPH_ADC0    (8)
 #define SOC_GDMA_TRIG_PERIPH_PARLIO0 (9)
+
+// On which system bus is the DMA instance of the peripheral connection mounted
+#define SOC_GDMA_BUS_ANY (-1)
+#define SOC_GDMA_BUS_AHB (0)
+
+#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS     SOC_GDMA_BUS_ANY
+#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS    SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_AES0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS  SOC_GDMA_BUS_AHB

+ 5 - 3
components/soc/esp32c6/include/soc/soc_caps.h

@@ -29,6 +29,7 @@
 #define SOC_DEDICATED_GPIO_SUPPORTED    1
 #define SOC_UART_SUPPORTED              1
 #define SOC_GDMA_SUPPORTED              1
+#define SOC_AHB_GDMA_SUPPORTED          1
 #define SOC_GPTIMER_SUPPORTED           1
 #define SOC_PCNT_SUPPORTED              1
 #define SOC_MCPWM_SUPPORTED             1
@@ -161,9 +162,10 @@
 #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
 
 /*-------------------------- GDMA CAPS -------------------------------------*/
-#define SOC_GDMA_GROUPS                 (1U) // Number of GDMA groups
-#define SOC_GDMA_PAIRS_PER_GROUP        (3)  // Number of GDMA pairs in each group
-#define SOC_GDMA_SUPPORT_ETM            (1)  // Support ETM submodule
+#define SOC_AHB_GDMA_VERSION            1U
+#define SOC_GDMA_NUM_GROUPS_MAX         1U
+#define SOC_GDMA_PAIRS_PER_GROUP_MAX    3
+#define SOC_GDMA_SUPPORT_ETM            1  // Support ETM submodule
 
 /*-------------------------- ETM CAPS --------------------------------------*/
 #define SOC_ETM_GROUPS                  1U  // Number of ETM groups

+ 10 - 2
components/soc/esp32h2/include/soc/Kconfig.soc_caps.in

@@ -23,6 +23,10 @@ config SOC_GDMA_SUPPORTED
     bool
     default y
 
+config SOC_AHB_GDMA_SUPPORTED
+    bool
+    default y
+
 config SOC_ASYNC_MEMCPY_SUPPORTED
     bool
     default y
@@ -367,11 +371,15 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
     int
     default 1100
 
-config SOC_GDMA_GROUPS
+config SOC_AHB_GDMA_VERSION
+    int
+    default 1
+
+config SOC_GDMA_NUM_GROUPS_MAX
     int
     default 1
 
-config SOC_GDMA_PAIRS_PER_GROUP
+config SOC_GDMA_PAIRS_PER_GROUP_MAX
     int
     default 3
 

+ 13 - 0
components/soc/esp32h2/include/soc/gdma_channel.h

@@ -15,3 +15,16 @@
 #define SOC_GDMA_TRIG_PERIPH_SHA0    (7)
 #define SOC_GDMA_TRIG_PERIPH_ADC0    (8)
 #define SOC_GDMA_TRIG_PERIPH_PARLIO0 (9)
+
+// On which system bus is the DMA instance of the peripheral connection mounted
+#define SOC_GDMA_BUS_ANY (-1)
+#define SOC_GDMA_BUS_AHB (0)
+
+#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS     SOC_GDMA_BUS_ANY
+#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS    SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_AES0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS  SOC_GDMA_BUS_AHB

+ 5 - 3
components/soc/esp32h2/include/soc/soc_caps.h

@@ -30,6 +30,7 @@
 #define SOC_DEDICATED_GPIO_SUPPORTED    1
 #define SOC_UART_SUPPORTED              1
 #define SOC_GDMA_SUPPORTED              1
+#define SOC_AHB_GDMA_SUPPORTED          1
 #define SOC_ASYNC_MEMCPY_SUPPORTED      1
 #define SOC_PCNT_SUPPORTED              1
 #define SOC_MCPWM_SUPPORTED             1
@@ -161,9 +162,10 @@
 #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
 
 /*-------------------------- GDMA CAPS -------------------------------------*/
-#define SOC_GDMA_GROUPS                 (1U) // Number of GDMA groups
-#define SOC_GDMA_PAIRS_PER_GROUP        (3)  // Number of GDMA pairs in each group
-#define SOC_GDMA_SUPPORT_ETM            (1)  // Support ETM submodule
+#define SOC_AHB_GDMA_VERSION            1U
+#define SOC_GDMA_NUM_GROUPS_MAX         1U
+#define SOC_GDMA_PAIRS_PER_GROUP_MAX    3
+#define SOC_GDMA_SUPPORT_ETM            1  // Support ETM submodule
 
 /*-------------------------- ETM CAPS --------------------------------------*/
 #define SOC_ETM_GROUPS                  1U  // Number of ETM groups

+ 8 - 4
components/soc/esp32p4/include/soc/Kconfig.soc_caps.in

@@ -195,17 +195,21 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
     int
     default 1100
 
-config SOC_GDMA_GROUPS
+config SOC_AHB_GDMA_VERSION
     int
-    default 1
+    default 2
 
-config SOC_GDMA_PAIRS_PER_GROUP
+config SOC_GDMA_NUM_GROUPS_MAX
+    int
+    default 2
+
+config SOC_GDMA_PAIRS_PER_GROUP_MAX
     int
     default 3
 
 config SOC_GDMA_SUPPORT_ETM
     bool
-    default n
+    default y
 
 config SOC_ETM_GROUPS
     int

Fișier diff suprimat deoarece este prea mare
+ 171 - 175
components/soc/esp32p4/include/soc/ahb_dma_struct.h


Fișier diff suprimat deoarece este prea mare
+ 190 - 191
components/soc/esp32p4/include/soc/axi_dma_struct.h


+ 38 - 0
components/soc/esp32p4/include/soc/gdma_channel.h

@@ -5,3 +5,41 @@
  */
 
 #pragma once
+
+// The following macros have a format SOC_[periph][instance_id] to make it work with `GDMA_MAKE_TRIGGER`
+#define SOC_GDMA_TRIG_PERIPH_M2M0     -1
+#define SOC_GDMA_TRIG_PERIPH_I3C0     0
+#define SOC_GDMA_TRIG_PERIPH_UHCI0    2
+#define SOC_GDMA_TRIG_PERIPH_I2S0     3
+#define SOC_GDMA_TRIG_PERIPH_I2S1     4
+#define SOC_GDMA_TRIG_PERIPH_I2S2     5
+#define SOC_GDMA_TRIG_PERIPH_ADC0     8
+#define SOC_GDMA_TRIG_PERIPH_RMT0     10
+#define SOC_GDMA_TRIG_PERIPH_LCD0     0
+#define SOC_GDMA_TRIG_PERIPH_CAM0     0
+#define SOC_GDMA_TRIG_PERIPH_SPI2     1
+#define SOC_GDMA_TRIG_PERIPH_SPI3     2
+#define SOC_GDMA_TRIG_PERIPH_PARLIO0  3
+#define SOC_GDMA_TRIG_PERIPH_AES0     4
+#define SOC_GDMA_TRIG_PERIPH_SHA0     5
+
+// On which system bus is the DMA instance of the peripheral connection mounted
+#define SOC_GDMA_BUS_ANY -1
+#define SOC_GDMA_BUS_AHB 0
+#define SOC_GDMA_BUS_AXI 1
+
+#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS     SOC_GDMA_BUS_ANY
+#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS    SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S1_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S2_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_RMT0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I3C0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS     SOC_GDMA_BUS_AXI
+#define SOC_GDMA_TRIG_PERIPH_SPI3_BUS     SOC_GDMA_BUS_AXI
+#define SOC_GDMA_TRIG_PERIPH_LCD0_BUS     SOC_GDMA_BUS_AXI
+#define SOC_GDMA_TRIG_PERIPH_CAM0_BUS     SOC_GDMA_BUS_AXI
+#define SOC_GDMA_TRIG_PERIPH_AES0_BUS     SOC_GDMA_BUS_AXI
+#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS     SOC_GDMA_BUS_AXI
+#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS  SOC_GDMA_BUS_AXI

+ 6 - 4
components/soc/esp32p4/include/soc/soc_caps.h

@@ -30,6 +30,8 @@
 // #define SOC_DEDICATED_GPIO_SUPPORTED    1  //TODO: IDF-7552
 #define SOC_UART_SUPPORTED              1
 // #define SOC_GDMA_SUPPORTED              1  //TODO: IDF-6504
+// #define SOC_AHB_GDMA_SUPPORTED          1
+// #define SOC_AXI_GDMA_SUPPORTED          1
 // #define SOC_GPTIMER_SUPPORTED           1  //TODO: IDF-6515
 // #define SOC_PCNT_SUPPORTED              1  //TODO: IDF-7475
 // #define SOC_MCPWM_SUPPORTED             1  //TODO: IDF-7493
@@ -155,9 +157,10 @@
 #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
 
 /*-------------------------- GDMA CAPS -------------------------------------*/
-#define SOC_GDMA_GROUPS                 (1U) // Number of GDMA groups
-#define SOC_GDMA_PAIRS_PER_GROUP        (3)  // Number of GDMA pairs in each group
-#define SOC_GDMA_SUPPORT_ETM            (0)  // Support ETM submodule
+#define SOC_AHB_GDMA_VERSION            2
+#define SOC_GDMA_NUM_GROUPS_MAX         2
+#define SOC_GDMA_PAIRS_PER_GROUP_MAX    3
+#define SOC_GDMA_SUPPORT_ETM            1  // Both AHB-DMA and AXI-DMA supports ETM
 
 /*-------------------------- ETM CAPS --------------------------------------*/
 #define SOC_ETM_GROUPS                  1U  // Number of ETM groups
@@ -422,7 +425,6 @@
 
 /*-------------------------- MEMPROT CAPS ------------------------------------*/
 
-
 /*-------------------------- UART CAPS ---------------------------------------*/
 // ESP32-P4 has 2 UARTs
 #define SOC_UART_NUM                    (2)

+ 13 - 5
components/soc/esp32s3/include/soc/Kconfig.soc_caps.in

@@ -47,6 +47,10 @@ config SOC_GDMA_SUPPORTED
     bool
     default y
 
+config SOC_AHB_GDMA_SUPPORTED
+    bool
+    default y
+
 config SOC_GPTIMER_SUPPORTED
     bool
     default y
@@ -363,15 +367,19 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
     int
     default 1100
 
-config SOC_GDMA_GROUPS
-    bool
-    default y
+config SOC_AHB_GDMA_VERSION
+    int
+    default 1
+
+config SOC_GDMA_NUM_GROUPS_MAX
+    int
+    default 1
 
-config SOC_GDMA_PAIRS_PER_GROUP
+config SOC_GDMA_PAIRS_PER_GROUP_MAX
     int
     default 5
 
-config SOC_GDMA_SUPPORT_PSRAM
+config SOC_AHB_GDMA_SUPPORT_PSRAM
     bool
     default y
 

+ 18 - 1
components/soc/esp32s3/include/soc/gdma_channel.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -19,3 +19,20 @@
 #define SOC_GDMA_TRIG_PERIPH_SHA0    (7)
 #define SOC_GDMA_TRIG_PERIPH_ADC0    (8)
 #define SOC_GDMA_TRIG_PERIPH_RMT0    (9)
+
+// On which system bus is the DMA instance of the peripheral connection mounted
+#define SOC_GDMA_BUS_ANY (-1)
+#define SOC_GDMA_BUS_AHB (0)
+
+#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS     SOC_GDMA_BUS_ANY
+#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SPI3_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS    SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_I2S1_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_LCD0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_CAM0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_AES0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS     SOC_GDMA_BUS_AHB
+#define SOC_GDMA_TRIG_PERIPH_RMT0_BUS     SOC_GDMA_BUS_AHB

+ 7 - 10
components/soc/esp32s3/include/soc/soc_caps.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2019-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -31,6 +31,7 @@
 #define SOC_WIFI_SUPPORTED              1
 #define SOC_TWAI_SUPPORTED              1
 #define SOC_GDMA_SUPPORTED              1
+#define SOC_AHB_GDMA_SUPPORTED          1
 #define SOC_GPTIMER_SUPPORTED           1
 #define SOC_LCDCAM_SUPPORTED            1
 #define SOC_MCPWM_SUPPORTED             1
@@ -145,9 +146,10 @@
 #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
 
 /*-------------------------- GDMA CAPS ---------------------------------------*/
-#define SOC_GDMA_GROUPS            (1)  // Number of GDMA groups
-#define SOC_GDMA_PAIRS_PER_GROUP   (5)  // Number of GDMA pairs in each group
-#define SOC_GDMA_SUPPORT_PSRAM     (1)  // GDMA can access external PSRAM
+#define SOC_AHB_GDMA_VERSION           1U
+#define SOC_GDMA_NUM_GROUPS_MAX        1U
+#define SOC_GDMA_PAIRS_PER_GROUP_MAX   5
+#define SOC_AHB_GDMA_SUPPORT_PSRAM     1
 
 /*-------------------------- GPIO CAPS ---------------------------------------*/
 // ESP32-S3 has 1 GPIO peripheral
@@ -168,7 +170,6 @@
 // digital I/O pad powered by VDD3P3_CPU or VDD_SPI(GPIO_NUM_26~GPIO_NUM_48)
 #define SOC_GPIO_VALID_DIGITAL_IO_PAD_MASK 0x0001FFFFFC000000ULL
 
-
 /*-------------------------- Dedicated GPIO CAPS -----------------------------*/
 #define SOC_DEDIC_GPIO_OUT_CHANNELS_NUM (8) /*!< 8 outward channels on each CPU core */
 #define SOC_DEDIC_GPIO_IN_CHANNELS_NUM  (8) /*!< 8 inward channels on each CPU core */
@@ -335,7 +336,7 @@
 #define SOC_TIMER_GROUP_TOTAL_TIMERS      (4)
 
 /*-------------------------- TOUCH SENSOR CAPS -------------------------------*/
-#define SOC_TOUCH_VERSION_2               	(1)  // Hardware version of touch sensor
+#define SOC_TOUCH_VERSION_2                 (1)  // Hardware version of touch sensor
 #define SOC_TOUCH_SENSOR_NUM                (15) /*! 15 Touch channels */
 #define SOC_TOUCH_PROXIMITY_CHANNEL_NUM     (3)  /* Sopport touch proximity channel number. */
 #define SOC_TOUCH_PROXIMITY_MEAS_DONE_SUPPORTED (1) /*Sopport touch proximity channel measure done interrupt type. */
@@ -367,7 +368,6 @@
 /*-------------------------- USB CAPS ----------------------------------------*/
 #define SOC_USB_PERIPH_NUM 1
 
-
 /*--------------------------- SHA CAPS ---------------------------------------*/
 /* Max amount of bytes in a single DMA operation is 4095,
    for SHA this means that the biggest safe amount of bytes is
@@ -392,7 +392,6 @@
 #define SOC_SHA_SUPPORT_SHA512_256      (1)
 #define SOC_SHA_SUPPORT_SHA512_T        (1)
 
-
 /*--------------------------- MPI CAPS ---------------------------------------*/
 #define SOC_MPI_MEM_BLOCKS_NUM (4)
 #define SOC_MPI_OPERATIONS_NUM (3)
@@ -400,7 +399,6 @@
 /*--------------------------- RSA CAPS ---------------------------------------*/
 #define SOC_RSA_MAX_BIT_LEN    (4096)
 
-
 /*-------------------------- AES CAPS -----------------------------------------*/
 #define SOC_AES_SUPPORT_DMA     (1)
 
@@ -410,7 +408,6 @@
 #define SOC_AES_SUPPORT_AES_128 (1)
 #define SOC_AES_SUPPORT_AES_256 (1)
 
-
 /*-------------------------- Power Management CAPS ---------------------------*/
 #define SOC_PM_SUPPORT_EXT0_WAKEUP      (1)
 #define SOC_PM_SUPPORT_EXT1_WAKEUP      (1)

+ 3 - 3
components/soc/include/soc/gdma_periph.h

@@ -1,5 +1,5 @@
 /*
- * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  *
  * SPDX-License-Identifier: Apache-2.0
  */
@@ -20,8 +20,8 @@ typedef struct {
         struct {
             const int rx_irq_id;
             const int tx_irq_id;
-        } pairs[SOC_GDMA_PAIRS_PER_GROUP];
-    } groups[SOC_GDMA_GROUPS];
+        } pairs[SOC_GDMA_PAIRS_PER_GROUP_MAX];
+    } groups[SOC_GDMA_NUM_GROUPS_MAX];
 } gdma_signal_conn_t;
 
 extern const gdma_signal_conn_t gdma_periph_signals;

+ 0 - 1
tools/ci/check_copyright_ignore.txt

@@ -635,7 +635,6 @@ components/mbedtls/esp_crt_bundle/test_gen_crt_bundle/test_gen_crt_bundle.py
 components/mbedtls/port/aes/block/esp_aes.c
 components/mbedtls/port/aes/dma/esp_aes.c
 components/mbedtls/port/aes/dma/esp_aes_crypto_dma_impl.c
-components/mbedtls/port/aes/dma/esp_aes_gdma_impl.c
 components/mbedtls/port/aes/dma/include/esp_aes_dma_priv.h
 components/mbedtls/port/aes/esp_aes_xts.c
 components/mbedtls/port/include/aes/esp_aes.h

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff