gdma.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * AHB-Bus --------+ +-------- AXI-Bus
  8. * | |
  9. * | |
  10. * +-----------------------------------+--+ +--+-----------------------------------+
  11. * | GDMA-Group-X | | | | GDMA-Group-Y |
  12. * | +-------------+ +------------+ | | | | +-------------+ +------------+ |
  13. * | | GDMA-Pair-0 |... |GDMA-Pair-N | | | | | | GDMA-Pair-0 |... |GDMA-Pair-N | |
  14. * | | | | | | | | | | | | | |
  15. * | | TX-Chan |... | TX-Chan | | | | | | TX-Chan |... | TX-Chan | |
  16. * | | RX-Chan | | RX-Chan | | | | | | RX-Chan | | RX-Chan | |
  17. * | +-------------+ +------------+ | | | | +-------------+ +------------+ |
  18. * | | | | | |
  19. * +-----------------------------------+--+ +--+-----------------------------------+
  20. * | |
  21. * | |
  22. *
  23. * - Channel is allocated when user calls `gdma_new_ahb/axi_channel`, its lifecycle is maintained by the user.
  24. * - Pair and Group are all lazy allocated, their life cycles are maintained by this driver.
  25. * - We're not using a global spin lock, instead, we created different spin locks at different level (group, pair).
  26. */
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/cdefs.h>
  30. #include "sdkconfig.h"
  31. #if CONFIG_GDMA_ENABLE_DEBUG_LOG
  32. // The local log level must be defined before including esp_log.h
  33. // Set the maximum log level for this source file
  34. #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  35. #endif
  36. #include "freertos/FreeRTOS.h"
  37. #include "freertos/task.h"
  38. #include "soc/soc_caps.h"
  39. #include "soc/periph_defs.h"
  40. #include "esp_log.h"
  41. #include "esp_check.h"
  42. #include "esp_memory_utils.h"
  43. #include "esp_private/periph_ctrl.h"
  44. #include "gdma_priv.h"
  45. #include "hal/cache_hal.h"
  46. #include "hal/cache_ll.h"
  47. static const char *TAG = "gdma";
  48. #if !SOC_RCC_IS_INDEPENDENT
  49. // Reset and Clock Control registers are mixing with other peripherals, so we need to use a critical section
  50. #define GDMA_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
  51. #else
  52. #define GDMA_RCC_ATOMIC()
  53. #endif
  54. #define GDMA_INVALID_PERIPH_TRIG (0x3F)
  55. #define SEARCH_REQUEST_RX_CHANNEL (1 << 0)
  56. #define SEARCH_REQUEST_TX_CHANNEL (1 << 1)
  57. typedef struct gdma_platform_t {
  58. portMUX_TYPE spinlock; // platform level spinlock, protect the group handle slots and reference count of each group.
  59. gdma_group_t *groups[SOC_GDMA_NUM_GROUPS_MAX]; // array of GDMA group instances
  60. int group_ref_counts[SOC_GDMA_NUM_GROUPS_MAX]; // reference count used to protect group install/uninstall
  61. } gdma_platform_t;
  62. static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config));
  63. static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id);
  64. static void gdma_release_group_handle(gdma_group_t *group);
  65. static void gdma_release_pair_handle(gdma_pair_t *pair);
  66. static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel);
  67. static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel);
  68. static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan);
  69. static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan);
  70. // gdma driver platform
  71. static gdma_platform_t s_platform = {
  72. .spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
  73. };
  74. typedef struct {
  75. int bus_id;
  76. int start_group_id;
  77. int end_group_id;
  78. int pairs_per_group;
  79. void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
  80. } gdma_channel_search_info_t;
  81. static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *search_info, const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
  82. {
  83. #if CONFIG_GDMA_ENABLE_DEBUG_LOG
  84. esp_log_level_set(TAG, ESP_LOG_DEBUG);
  85. #endif
  86. esp_err_t ret = ESP_OK;
  87. gdma_tx_channel_t *alloc_tx_channel = NULL;
  88. gdma_rx_channel_t *alloc_rx_channel = NULL;
  89. int search_code = 0;
  90. gdma_pair_t *pair = NULL;
  91. gdma_group_t *group = NULL;
  92. ESP_RETURN_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  93. if (config->flags.reserve_sibling) {
  94. search_code = SEARCH_REQUEST_RX_CHANNEL | SEARCH_REQUEST_TX_CHANNEL; // search for a pair of channels
  95. }
  96. if (config->direction == GDMA_CHANNEL_DIRECTION_TX) {
  97. search_code |= SEARCH_REQUEST_TX_CHANNEL; // search TX only
  98. alloc_tx_channel = heap_caps_calloc(1, sizeof(gdma_tx_channel_t), GDMA_MEM_ALLOC_CAPS);
  99. ESP_GOTO_ON_FALSE(alloc_tx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for gdma tx channel");
  100. } else if (config->direction == GDMA_CHANNEL_DIRECTION_RX) {
  101. search_code |= SEARCH_REQUEST_RX_CHANNEL; // search RX only
  102. alloc_rx_channel = heap_caps_calloc(1, sizeof(gdma_rx_channel_t), GDMA_MEM_ALLOC_CAPS);
  103. ESP_GOTO_ON_FALSE(alloc_rx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for gdma rx channel");
  104. }
  105. if (config->sibling_chan) {
  106. pair = config->sibling_chan->pair;
  107. ESP_GOTO_ON_FALSE(pair, ESP_ERR_INVALID_ARG, err, TAG, "invalid sibling channel");
  108. ESP_GOTO_ON_FALSE(config->sibling_chan->direction != config->direction, ESP_ERR_INVALID_ARG, err, TAG, "sibling channel should have a different direction");
  109. group = pair->group;
  110. portENTER_CRITICAL(&group->spinlock);
  111. group->pair_ref_counts[pair->pair_id]++; // channel obtains a reference to pair
  112. portEXIT_CRITICAL(&group->spinlock);
  113. goto search_done; // skip the search path below if user has specify a sibling channel
  114. }
  115. int start_group_id = search_info->start_group_id;
  116. int end_group_id = search_info->end_group_id;
  117. int pairs_per_group = search_info->pairs_per_group;
  118. for (int i = start_group_id; i < end_group_id && search_code; i++) { // loop to search group
  119. group = gdma_acquire_group_handle(i, search_info->hal_init);
  120. group->bus_id = search_info->bus_id;
  121. ESP_GOTO_ON_FALSE(group, ESP_ERR_NO_MEM, err, TAG, "no mem for group(%d)", i);
  122. for (int j = 0; j < pairs_per_group && search_code; j++) { // loop to search pair
  123. pair = gdma_acquire_pair_handle(group, j);
  124. ESP_GOTO_ON_FALSE(pair, ESP_ERR_NO_MEM, err, TAG, "no mem for pair(%d,%d)", i, j);
  125. portENTER_CRITICAL(&pair->spinlock);
  126. if (!(search_code & pair->occupy_code)) { // pair has suitable position for acquired channel(s)
  127. pair->occupy_code |= search_code;
  128. search_code = 0; // exit search loop
  129. }
  130. portEXIT_CRITICAL(&pair->spinlock);
  131. // found a pair that satisfies the search condition
  132. if (search_code == 0) {
  133. portENTER_CRITICAL(&group->spinlock);
  134. group->pair_ref_counts[pair->pair_id]++; // channel obtains a reference to pair
  135. portEXIT_CRITICAL(&group->spinlock);
  136. }
  137. gdma_release_pair_handle(pair);
  138. } // loop used to search pair
  139. gdma_release_group_handle(group);
  140. // restore to initial state if no suitable channel slot is found
  141. if (search_code) {
  142. group = NULL;
  143. pair = NULL;
  144. }
  145. } // loop used to search group
  146. ESP_GOTO_ON_FALSE(search_code == 0, ESP_ERR_NOT_FOUND, err, TAG, "no free gdma channel, search code=%d", search_code);
  147. assert(pair && group); // pair and group handle shouldn't be NULL
  148. search_done:
  149. // register TX channel
  150. if (alloc_tx_channel) {
  151. pair->tx_chan = alloc_tx_channel;
  152. alloc_tx_channel->base.pair = pair;
  153. alloc_tx_channel->base.direction = GDMA_CHANNEL_DIRECTION_TX;
  154. alloc_tx_channel->base.periph_id = GDMA_INVALID_PERIPH_TRIG;
  155. alloc_tx_channel->base.del = gdma_del_tx_channel; // set channel deletion function
  156. *ret_chan = &alloc_tx_channel->base; // return the installed channel
  157. }
  158. // register RX channel
  159. if (alloc_rx_channel) {
  160. pair->rx_chan = alloc_rx_channel;
  161. alloc_rx_channel->base.pair = pair;
  162. alloc_rx_channel->base.direction = GDMA_CHANNEL_DIRECTION_RX;
  163. alloc_rx_channel->base.periph_id = GDMA_INVALID_PERIPH_TRIG;
  164. alloc_rx_channel->base.del = gdma_del_rx_channel; // set channel deletion function
  165. *ret_chan = &alloc_rx_channel->base; // return the installed channel
  166. }
  167. (*ret_chan)->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  168. ESP_LOGD(TAG, "new %s channel (%d,%d) at %p", (config->direction == GDMA_CHANNEL_DIRECTION_TX) ? "tx" : "rx",
  169. group->group_id, pair->pair_id, *ret_chan);
  170. return ESP_OK;
  171. err:
  172. if (alloc_tx_channel) {
  173. free(alloc_tx_channel);
  174. }
  175. if (alloc_rx_channel) {
  176. free(alloc_rx_channel);
  177. }
  178. if (pair) {
  179. gdma_release_pair_handle(pair);
  180. }
  181. if (group) {
  182. gdma_release_group_handle(group);
  183. }
  184. return ret;
  185. }
  186. #if SOC_AHB_GDMA_SUPPORTED
  187. esp_err_t gdma_new_ahb_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
  188. {
  189. gdma_channel_search_info_t search_info = {
  190. .bus_id = SOC_GDMA_BUS_AHB,
  191. .start_group_id = GDMA_LL_AHB_GROUP_START_ID,
  192. .end_group_id = GDMA_LL_AHB_GROUP_START_ID + GDMA_LL_AHB_NUM_GROUPS,
  193. .pairs_per_group = GDMA_LL_AHB_PAIRS_PER_GROUP,
  194. .hal_init = gdma_ahb_hal_init,
  195. };
  196. return do_allocate_gdma_channel(&search_info, config, ret_chan);
  197. }
  198. esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
  199. __attribute__((alias("gdma_new_ahb_channel")));
  200. #endif // SOC_AHB_GDMA_SUPPORTED
  201. #if SOC_AXI_GDMA_SUPPORTED
  202. esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
  203. {
  204. gdma_channel_search_info_t search_info = {
  205. .bus_id = SOC_GDMA_BUS_AXI,
  206. .start_group_id = GDMA_LL_AXI_GROUP_START_ID,
  207. .end_group_id = GDMA_LL_AXI_GROUP_START_ID + GDMA_LL_AXI_NUM_GROUPS,
  208. .pairs_per_group = GDMA_LL_AXI_PAIRS_PER_GROUP,
  209. .hal_init = gdma_axi_hal_init,
  210. };
  211. return do_allocate_gdma_channel(&search_info, config, ret_chan);
  212. }
  213. #endif // SOC_AXI_GDMA_SUPPORTED
  214. esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan)
  215. {
  216. ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  217. gdma_pair_t *pair = dma_chan->pair;
  218. gdma_group_t *group = pair->group;
  219. gdma_hal_context_t *hal = &group->hal;
  220. // reset the channel priority to default
  221. gdma_hal_set_priority(hal, pair->pair_id, dma_chan->direction, 0);
  222. // call `gdma_del_tx_channel` or `gdma_del_rx_channel` under the hood
  223. return dma_chan->del(dma_chan);
  224. }
  225. esp_err_t gdma_get_channel_id(gdma_channel_handle_t dma_chan, int *channel_id)
  226. {
  227. esp_err_t ret = ESP_OK;
  228. gdma_pair_t *pair = NULL;
  229. ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  230. pair = dma_chan->pair;
  231. *channel_id = pair->pair_id;
  232. err:
  233. return ret;
  234. }
  235. esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_periph)
  236. {
  237. ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  238. ESP_RETURN_ON_FALSE(dma_chan->periph_id == GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "channel is using by peripheral: %d", dma_chan->periph_id);
  239. gdma_pair_t *pair = dma_chan->pair;
  240. gdma_group_t *group = pair->group;
  241. gdma_hal_context_t *hal = &group->hal;
  242. bool periph_conflict = false;
  243. if (trig_periph.bus_id != SOC_GDMA_BUS_ANY) {
  244. ESP_RETURN_ON_FALSE(trig_periph.bus_id == group->bus_id, ESP_ERR_INVALID_ARG, TAG,
  245. "peripheral and DMA system bus mismatch");
  246. }
  247. if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
  248. if (trig_periph.instance_id >= 0) {
  249. portENTER_CRITICAL(&group->spinlock);
  250. if (group->tx_periph_in_use_mask & (1 << trig_periph.instance_id)) {
  251. periph_conflict = true;
  252. } else {
  253. group->tx_periph_in_use_mask |= (1 << trig_periph.instance_id);
  254. }
  255. portEXIT_CRITICAL(&group->spinlock);
  256. }
  257. } else {
  258. if (trig_periph.instance_id >= 0) {
  259. portENTER_CRITICAL(&group->spinlock);
  260. if (group->rx_periph_in_use_mask & (1 << trig_periph.instance_id)) {
  261. periph_conflict = true;
  262. } else {
  263. group->rx_periph_in_use_mask |= (1 << trig_periph.instance_id);
  264. }
  265. portEXIT_CRITICAL(&group->spinlock);
  266. }
  267. }
  268. ESP_RETURN_ON_FALSE(!periph_conflict, ESP_ERR_INVALID_STATE, TAG, "peripheral %d is already used by another channel", trig_periph.instance_id);
  269. gdma_hal_connect_peri(hal, pair->pair_id, dma_chan->direction, trig_periph.periph, trig_periph.instance_id);
  270. dma_chan->periph_id = trig_periph.instance_id;
  271. return ESP_OK;
  272. }
  273. esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
  274. {
  275. ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  276. ESP_RETURN_ON_FALSE(dma_chan->periph_id != GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "no peripheral is connected to the channel");
  277. gdma_pair_t *pair = dma_chan->pair;
  278. gdma_group_t *group = pair->group;
  279. gdma_hal_context_t *hal = &group->hal;
  280. int save_periph_id = dma_chan->periph_id;
  281. if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
  282. if (save_periph_id >= 0) {
  283. portENTER_CRITICAL(&group->spinlock);
  284. group->tx_periph_in_use_mask &= ~(1 << save_periph_id);
  285. portEXIT_CRITICAL(&group->spinlock);
  286. }
  287. } else {
  288. if (save_periph_id >= 0) {
  289. portENTER_CRITICAL(&group->spinlock);
  290. group->rx_periph_in_use_mask &= ~(1 << save_periph_id);
  291. portEXIT_CRITICAL(&group->spinlock);
  292. }
  293. }
  294. gdma_hal_disconnect_peri(hal, pair->pair_id, dma_chan->direction);
  295. dma_chan->periph_id = GDMA_INVALID_PERIPH_TRIG;
  296. return ESP_OK;
  297. }
  298. esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_t *mask)
  299. {
  300. ESP_RETURN_ON_FALSE(dma_chan && mask, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  301. gdma_pair_t *pair = dma_chan->pair;
  302. gdma_group_t *group = pair->group;
  303. uint32_t free_mask = group->hal.priv_data->m2m_free_periph_mask;
  304. portENTER_CRITICAL(&group->spinlock);
  305. free_mask &= ~(group->tx_periph_in_use_mask);
  306. free_mask &= ~(group->rx_periph_in_use_mask);
  307. portEXIT_CRITICAL(&group->spinlock);
  308. *mask = free_mask;
  309. return ESP_OK;
  310. }
  311. esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
  312. {
  313. ESP_RETURN_ON_FALSE(dma_chan && ability, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  314. gdma_pair_t *pair = dma_chan->pair;
  315. gdma_group_t *group = pair->group;
  316. gdma_hal_context_t *hal = &group->hal;
  317. size_t sram_alignment = ability->sram_trans_align;
  318. size_t psram_alignment = ability->psram_trans_align;
  319. // alignment should be 2^n
  320. ESP_RETURN_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG,
  321. TAG, "invalid sram alignment: %zu", sram_alignment);
  322. uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA);
  323. if (psram_alignment == 0) {
  324. // fall back to use the same size of the psram data cache line size
  325. psram_alignment = data_cache_line_size;
  326. }
  327. if (psram_alignment > data_cache_line_size) {
  328. ESP_RETURN_ON_FALSE(((psram_alignment % data_cache_line_size) == 0), ESP_ERR_INVALID_ARG,
  329. TAG, "psram_alignment(%d) should be multiple of the data_cache_line_size(%"PRIu32")",
  330. psram_alignment, data_cache_line_size);
  331. }
  332. // if the DMA can't access the PSRAM, this HAL function is no-op
  333. gdma_hal_set_ext_mem_align(hal, pair->pair_id, dma_chan->direction, psram_alignment);
  334. // TX channel can always enable burst mode, no matter data alignment
  335. bool en_burst = true;
  336. if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
  337. // RX channel burst mode depends on specific data alignment
  338. en_burst = sram_alignment >= 4;
  339. }
  340. gdma_hal_enable_burst(hal, pair->pair_id, dma_chan->direction, en_burst, en_burst);
  341. dma_chan->sram_alignment = sram_alignment;
  342. dma_chan->psram_alignment = psram_alignment;
  343. ESP_LOGD(TAG, "%s channel (%d,%d), (%u:%u) bytes aligned, burst %s", dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX ? "tx" : "rx",
  344. group->group_id, pair->pair_id, sram_alignment, psram_alignment, en_burst ? "enabled" : "disabled");
  345. return ESP_OK;
  346. }
  347. esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strategy_config_t *config)
  348. {
  349. ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  350. gdma_pair_t *pair = dma_chan->pair;
  351. gdma_group_t *group = pair->group;
  352. gdma_hal_context_t *hal = &group->hal;
  353. gdma_hal_set_strategy(hal, pair->pair_id, dma_chan->direction, config->owner_check, config->auto_update_desc);
  354. return ESP_OK;
  355. }
  356. esp_err_t gdma_set_priority(gdma_channel_handle_t dma_chan, uint32_t priority)
  357. {
  358. ESP_RETURN_ON_FALSE(dma_chan && priority <= GDMA_LL_CHANNEL_MAX_PRIORITY, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  359. gdma_pair_t *pair = dma_chan->pair;
  360. gdma_group_t *group = pair->group;
  361. gdma_hal_context_t *hal = &group->hal;
  362. gdma_hal_set_priority(hal, pair->pair_id, dma_chan->direction, priority);
  363. return ESP_OK;
  364. }
  365. #if SOC_GDMA_SUPPORT_CRC
  366. esp_err_t gdma_config_crc_calculator(gdma_channel_handle_t dma_chan, const gdma_crc_calculator_config_t *config)
  367. {
  368. ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  369. gdma_pair_t *pair = dma_chan->pair;
  370. gdma_group_t *group = pair->group;
  371. gdma_hal_context_t *hal = &group->hal;
  372. switch (group->bus_id) {
  373. #if SOC_AHB_GDMA_SUPPORTED
  374. case SOC_GDMA_BUS_AHB:
  375. ESP_RETURN_ON_FALSE(config->crc_bit_width <= GDMA_LL_AHB_MAX_CRC_BIT_WIDTH, ESP_ERR_INVALID_ARG, TAG, "invalid crc bit width");
  376. break;
  377. #endif // SOC_AHB_GDMA_SUPPORTED
  378. #if SOC_AXI_GDMA_SUPPORTED
  379. case SOC_GDMA_BUS_AXI:
  380. ESP_RETURN_ON_FALSE(config->crc_bit_width <= GDMA_LL_AXI_MAX_CRC_BIT_WIDTH, ESP_ERR_INVALID_ARG, TAG, "invalid crc bit width");
  381. break;
  382. #endif // SOC_AXI_GDMA_SUPPORTED
  383. default:
  384. ESP_LOGE(TAG, "invalid bus id: %d", group->bus_id);
  385. return ESP_ERR_INVALID_ARG;
  386. }
  387. // clear the previous CRC result
  388. gdma_hal_clear_crc(hal, pair->pair_id, dma_chan->direction);
  389. // set polynomial and initial value
  390. gdma_hal_crc_config_t hal_config = {
  391. .crc_bit_width = config->crc_bit_width,
  392. .poly_hex = config->poly_hex,
  393. .init_value = config->init_value,
  394. .reverse_data_mask = config->reverse_data_mask,
  395. };
  396. gdma_hal_set_crc_poly(hal, pair->pair_id, dma_chan->direction, &hal_config);
  397. return ESP_OK;
  398. }
  399. esp_err_t gdma_crc_get_result(gdma_channel_handle_t dma_chan, uint32_t *result)
  400. {
  401. ESP_RETURN_ON_FALSE(dma_chan && result, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  402. gdma_pair_t *pair = dma_chan->pair;
  403. gdma_group_t *group = pair->group;
  404. gdma_hal_context_t *hal = &group->hal;
  405. *result = gdma_hal_get_crc_result(hal, pair->pair_id, dma_chan->direction);
  406. return ESP_OK;
  407. }
  408. #endif // SOC_GDMA_SUPPORT_CRC
  409. esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data)
  410. {
  411. ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  412. gdma_pair_t *pair = dma_chan->pair;
  413. gdma_group_t *group = pair->group;
  414. gdma_hal_context_t *hal = &group->hal;
  415. gdma_tx_channel_t *tx_chan = __containerof(dma_chan, gdma_tx_channel_t, base);
  416. #if CONFIG_GDMA_ISR_IRAM_SAFE
  417. if (cbs->on_trans_eof) {
  418. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_eof), ESP_ERR_INVALID_ARG,
  419. TAG, "on_trans_eof not in IRAM");
  420. }
  421. if (cbs->on_descr_err) {
  422. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG,
  423. TAG, "on_descr_err not in IRAM");
  424. }
  425. if (user_data) {
  426. ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
  427. TAG, "user context not in internal RAM");
  428. }
  429. #endif // CONFIG_GDMA_ISR_IRAM_SAFE
  430. // lazy install interrupt service
  431. ESP_RETURN_ON_ERROR(gdma_install_tx_interrupt(tx_chan), TAG, "install interrupt service failed");
  432. // enable/disable GDMA interrupt events for TX channel
  433. portENTER_CRITICAL(&pair->spinlock);
  434. gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
  435. gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, GDMA_LL_EVENT_TX_DESC_ERROR, cbs->on_descr_err != NULL);
  436. portEXIT_CRITICAL(&pair->spinlock);
  437. memcpy(&tx_chan->cbs, cbs, sizeof(gdma_tx_event_callbacks_t));
  438. tx_chan->user_data = user_data;
  439. ESP_RETURN_ON_ERROR(esp_intr_enable(dma_chan->intr), TAG, "enable interrupt failed");
  440. return ESP_OK;
  441. }
  442. esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_rx_event_callbacks_t *cbs, void *user_data)
  443. {
  444. ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  445. gdma_pair_t *pair = dma_chan->pair;
  446. gdma_group_t *group = pair->group;
  447. gdma_hal_context_t *hal = &group->hal;
  448. gdma_rx_channel_t *rx_chan = __containerof(dma_chan, gdma_rx_channel_t, base);
  449. #if CONFIG_GDMA_ISR_IRAM_SAFE
  450. if (cbs->on_recv_eof) {
  451. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_eof), ESP_ERR_INVALID_ARG,
  452. TAG, "on_recv_eof not in IRAM");
  453. }
  454. if (cbs->on_descr_err) {
  455. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG,
  456. TAG, "on_descr_err not in IRAM");
  457. }
  458. if (cbs->on_recv_done) {
  459. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG,
  460. TAG, "on_recv_done not in IRAM");
  461. }
  462. if (user_data) {
  463. ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
  464. TAG, "user context not in internal RAM");
  465. }
  466. #endif // CONFIG_GDMA_ISR_IRAM_SAFE
  467. // lazy install interrupt service
  468. ESP_RETURN_ON_ERROR(gdma_install_rx_interrupt(rx_chan), TAG, "install interrupt service failed");
  469. // enable/disable GDMA interrupt events for RX channel
  470. portENTER_CRITICAL(&pair->spinlock);
  471. gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
  472. gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_DESC_ERROR, cbs->on_descr_err != NULL);
  473. gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_DONE, cbs->on_recv_done != NULL);
  474. portEXIT_CRITICAL(&pair->spinlock);
  475. memcpy(&rx_chan->cbs, cbs, sizeof(gdma_rx_event_callbacks_t));
  476. rx_chan->user_data = user_data;
  477. ESP_RETURN_ON_ERROR(esp_intr_enable(dma_chan->intr), TAG, "enable interrupt failed");
  478. return ESP_OK;
  479. }
  480. esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
  481. {
  482. ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  483. ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
  484. gdma_pair_t *pair = dma_chan->pair;
  485. gdma_group_t *group = pair->group;
  486. gdma_hal_context_t *hal = &group->hal;
  487. portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
  488. gdma_hal_start_with_desc(hal, pair->pair_id, dma_chan->direction, desc_base_addr);
  489. portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
  490. return ESP_OK;
  491. }
  492. esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
  493. {
  494. ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  495. ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
  496. gdma_pair_t *pair = dma_chan->pair;
  497. gdma_group_t *group = pair->group;
  498. gdma_hal_context_t *hal = &group->hal;
  499. portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
  500. gdma_hal_stop(hal, pair->pair_id, dma_chan->direction);
  501. portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
  502. return ESP_OK;
  503. }
  504. esp_err_t gdma_append(gdma_channel_handle_t dma_chan)
  505. {
  506. ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  507. gdma_pair_t *pair = dma_chan->pair;
  508. gdma_group_t *group = pair->group;
  509. gdma_hal_context_t *hal = &group->hal;
  510. portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
  511. gdma_hal_append(hal, pair->pair_id, dma_chan->direction);
  512. portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
  513. return ESP_OK;
  514. }
  515. esp_err_t gdma_reset(gdma_channel_handle_t dma_chan)
  516. {
  517. ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  518. gdma_pair_t *pair = dma_chan->pair;
  519. gdma_group_t *group = pair->group;
  520. gdma_hal_context_t *hal = &group->hal;
  521. portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
  522. gdma_hal_reset(hal, pair->pair_id, dma_chan->direction);
  523. portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
  524. return ESP_OK;
  525. }
  526. static void gdma_release_group_handle(gdma_group_t *group)
  527. {
  528. int group_id = group->group_id;
  529. bool do_deinitialize = false;
  530. portENTER_CRITICAL(&s_platform.spinlock);
  531. s_platform.group_ref_counts[group_id]--;
  532. if (s_platform.group_ref_counts[group_id] == 0) {
  533. assert(s_platform.groups[group_id]);
  534. do_deinitialize = true;
  535. // deregister from the platform
  536. s_platform.groups[group_id] = NULL;
  537. }
  538. portEXIT_CRITICAL(&s_platform.spinlock);
  539. if (do_deinitialize) {
  540. gdma_hal_deinit(&group->hal);
  541. GDMA_RCC_ATOMIC() {
  542. gdma_ll_enable_bus_clock(group_id, false);
  543. }
  544. free(group);
  545. ESP_LOGD(TAG, "del group %d", group_id);
  546. }
  547. }
  548. static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config))
  549. {
  550. bool new_group = false;
  551. gdma_group_t *group = NULL;
  552. gdma_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(gdma_group_t), GDMA_MEM_ALLOC_CAPS);
  553. if (!pre_alloc_group) {
  554. goto out;
  555. }
  556. portENTER_CRITICAL(&s_platform.spinlock);
  557. if (!s_platform.groups[group_id]) {
  558. new_group = true;
  559. group = pre_alloc_group;
  560. s_platform.groups[group_id] = group; // register to platform
  561. } else {
  562. group = s_platform.groups[group_id];
  563. }
  564. // someone acquired the group handle means we have a new object that refer to this group
  565. s_platform.group_ref_counts[group_id]++;
  566. portEXIT_CRITICAL(&s_platform.spinlock);
  567. if (new_group) {
  568. group->group_id = group_id;
  569. group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  570. // enable APB to access GDMA registers
  571. GDMA_RCC_ATOMIC() {
  572. gdma_ll_enable_bus_clock(group_id, true);
  573. gdma_ll_reset_register(group_id);
  574. }
  575. gdma_hal_config_t config = {
  576. .group_id = group_id,
  577. };
  578. hal_init(&group->hal, &config);
  579. ESP_LOGD(TAG, "new group (%d) at %p", group_id, group);
  580. } else {
  581. free(pre_alloc_group);
  582. }
  583. out:
  584. return group;
  585. }
  586. static void gdma_release_pair_handle(gdma_pair_t *pair)
  587. {
  588. gdma_group_t *group = pair->group;
  589. int pair_id = pair->pair_id;
  590. bool do_deinitialize = false;
  591. portENTER_CRITICAL(&group->spinlock);
  592. group->pair_ref_counts[pair_id]--;
  593. if (group->pair_ref_counts[pair_id] == 0) {
  594. assert(group->pairs[pair_id]);
  595. do_deinitialize = true;
  596. group->pairs[pair_id] = NULL; // deregister from pair
  597. }
  598. portEXIT_CRITICAL(&group->spinlock);
  599. if (do_deinitialize) {
  600. free(pair);
  601. ESP_LOGD(TAG, "del pair (%d,%d)", group->group_id, pair_id);
  602. gdma_release_group_handle(group);
  603. }
  604. }
  605. static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
  606. {
  607. bool new_pair = false;
  608. gdma_pair_t *pair = NULL;
  609. gdma_pair_t *pre_alloc_pair = heap_caps_calloc(1, sizeof(gdma_pair_t), GDMA_MEM_ALLOC_CAPS);
  610. if (!pre_alloc_pair) {
  611. goto out;
  612. }
  613. portENTER_CRITICAL(&group->spinlock);
  614. if (!group->pairs[pair_id]) {
  615. new_pair = true;
  616. pair = pre_alloc_pair;
  617. // register the pair to the group
  618. group->pairs[pair_id] = pair;
  619. } else {
  620. pair = group->pairs[pair_id];
  621. }
  622. // someone acquired the pair handle means we have a new object that refer to this pair
  623. group->pair_ref_counts[pair_id]++;
  624. portEXIT_CRITICAL(&group->spinlock);
  625. if (new_pair) {
  626. pair->group = group;
  627. pair->pair_id = pair_id;
  628. pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  629. portENTER_CRITICAL(&s_platform.spinlock);
  630. // pair obtains a reference to group, so increase it
  631. s_platform.group_ref_counts[group->group_id]++;
  632. portEXIT_CRITICAL(&s_platform.spinlock);
  633. ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair_id, pair);
  634. } else {
  635. free(pre_alloc_pair);
  636. }
  637. out:
  638. return pair;
  639. }
  640. static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
  641. {
  642. gdma_pair_t *pair = dma_channel->pair;
  643. gdma_group_t *group = pair->group;
  644. gdma_hal_context_t *hal = &group->hal;
  645. int pair_id = pair->pair_id;
  646. int group_id = group->group_id;
  647. gdma_tx_channel_t *tx_chan = __containerof(dma_channel, gdma_tx_channel_t, base);
  648. portENTER_CRITICAL(&pair->spinlock);
  649. pair->tx_chan = NULL;
  650. pair->occupy_code &= ~SEARCH_REQUEST_TX_CHANNEL;
  651. portEXIT_CRITICAL(&pair->spinlock);
  652. if (dma_channel->intr) {
  653. esp_intr_free(dma_channel->intr);
  654. portENTER_CRITICAL(&pair->spinlock);
  655. gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interrupt events
  656. gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
  657. portEXIT_CRITICAL(&pair->spinlock);
  658. ESP_LOGD(TAG, "uninstall interrupt service for tx channel (%d,%d)", group_id, pair_id);
  659. }
  660. free(tx_chan);
  661. ESP_LOGD(TAG, "del tx channel (%d,%d)", group_id, pair_id);
  662. // channel has a reference on pair, release it now
  663. gdma_release_pair_handle(pair);
  664. return ESP_OK;
  665. }
  666. static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
  667. {
  668. gdma_pair_t *pair = dma_channel->pair;
  669. gdma_group_t *group = pair->group;
  670. gdma_hal_context_t *hal = &group->hal;
  671. int pair_id = pair->pair_id;
  672. int group_id = group->group_id;
  673. gdma_rx_channel_t *rx_chan = __containerof(dma_channel, gdma_rx_channel_t, base);
  674. portENTER_CRITICAL(&pair->spinlock);
  675. pair->rx_chan = NULL;
  676. pair->occupy_code &= ~SEARCH_REQUEST_RX_CHANNEL;
  677. portEXIT_CRITICAL(&pair->spinlock);
  678. if (dma_channel->intr) {
  679. esp_intr_free(dma_channel->intr);
  680. portENTER_CRITICAL(&pair->spinlock);
  681. gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interrupt events
  682. gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
  683. portEXIT_CRITICAL(&pair->spinlock);
  684. ESP_LOGD(TAG, "uninstall interrupt service for rx channel (%d,%d)", group_id, pair_id);
  685. }
  686. free(rx_chan);
  687. ESP_LOGD(TAG, "del rx channel (%d,%d)", group_id, pair_id);
  688. gdma_release_pair_handle(pair);
  689. return ESP_OK;
  690. }
  691. void gdma_default_rx_isr(void *args)
  692. {
  693. gdma_rx_channel_t *rx_chan = (gdma_rx_channel_t *)args;
  694. gdma_pair_t *pair = rx_chan->base.pair;
  695. gdma_group_t *group = pair->group;
  696. gdma_hal_context_t *hal = &group->hal;
  697. int pair_id = pair->pair_id;
  698. bool need_yield = false;
  699. // clear pending interrupt event
  700. uint32_t intr_status = gdma_hal_read_intr_status(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX);
  701. gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, intr_status);
  702. /* Call on_recv_done before eof callbacks to ensure a correct sequence */
  703. if ((intr_status & GDMA_LL_EVENT_RX_DONE) && rx_chan->cbs.on_recv_done) {
  704. /* Here we don't return an event data in this callback.
  705. * Because we can't get a determinant descriptor address
  706. * that just finished processing by DMA controller.
  707. * When the `rx_done` interrupt triggers, the finished descriptor should ideally
  708. * stored in `in_desc_bf1` register, however, as it takes a while to
  709. * get the `in_desc_bf1` in software, `in_desc_bf1` might have already refreshed,
  710. * Therefore, instead of returning an unreliable descriptor, we choose to return nothing.
  711. */
  712. need_yield |= rx_chan->cbs.on_recv_done(&rx_chan->base, NULL, rx_chan->user_data);
  713. }
  714. if ((intr_status & GDMA_LL_EVENT_RX_DESC_ERROR) && rx_chan->cbs.on_descr_err) {
  715. need_yield |= rx_chan->cbs.on_descr_err(&rx_chan->base, NULL, rx_chan->user_data);
  716. }
  717. if ((intr_status & GDMA_LL_EVENT_RX_SUC_EOF) && rx_chan->cbs.on_recv_eof) {
  718. uint32_t eof_addr = gdma_hal_get_eof_desc_addr(&group->hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, true);
  719. gdma_event_data_t suc_eof_data = {
  720. .rx_eof_desc_addr = eof_addr,
  721. };
  722. need_yield |= rx_chan->cbs.on_recv_eof(&rx_chan->base, &suc_eof_data, rx_chan->user_data);
  723. }
  724. if ((intr_status & GDMA_LL_EVENT_RX_ERR_EOF) && rx_chan->cbs.on_recv_eof) {
  725. uint32_t eof_addr = gdma_hal_get_eof_desc_addr(&group->hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, false);
  726. gdma_event_data_t err_eof_data = {
  727. .rx_eof_desc_addr = eof_addr,
  728. .flags.abnormal_eof = true,
  729. };
  730. need_yield |= rx_chan->cbs.on_recv_eof(&rx_chan->base, &err_eof_data, rx_chan->user_data);
  731. }
  732. if (need_yield) {
  733. portYIELD_FROM_ISR();
  734. }
  735. }
  736. void gdma_default_tx_isr(void *args)
  737. {
  738. gdma_tx_channel_t *tx_chan = (gdma_tx_channel_t *)args;
  739. gdma_pair_t *pair = tx_chan->base.pair;
  740. gdma_group_t *group = pair->group;
  741. gdma_hal_context_t *hal = &group->hal;
  742. int pair_id = pair->pair_id;
  743. bool need_yield = false;
  744. // clear pending interrupt event
  745. uint32_t intr_status = gdma_hal_read_intr_status(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX);
  746. gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, intr_status);
  747. if ((intr_status & GDMA_LL_EVENT_TX_EOF) && tx_chan->cbs.on_trans_eof) {
  748. uint32_t eof_addr = gdma_hal_get_eof_desc_addr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, true);
  749. gdma_event_data_t edata = {
  750. .tx_eof_desc_addr = eof_addr,
  751. };
  752. need_yield |= tx_chan->cbs.on_trans_eof(&tx_chan->base, &edata, tx_chan->user_data);
  753. }
  754. if ((intr_status & GDMA_LL_EVENT_TX_DESC_ERROR) && tx_chan->cbs.on_descr_err) {
  755. need_yield |= tx_chan->cbs.on_descr_err(&tx_chan->base, NULL, tx_chan->user_data);
  756. }
  757. if (need_yield) {
  758. portYIELD_FROM_ISR();
  759. }
  760. }
  761. static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan)
  762. {
  763. esp_err_t ret = ESP_OK;
  764. gdma_pair_t *pair = rx_chan->base.pair;
  765. gdma_group_t *group = pair->group;
  766. gdma_hal_context_t *hal = &group->hal;
  767. int pair_id = pair->pair_id;
  768. // pre-alloc a interrupt handle, with handler disabled
  769. int isr_flags = GDMA_INTR_ALLOC_FLAGS;
  770. #if GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT
  771. isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
  772. #endif
  773. intr_handle_t intr = NULL;
  774. ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair_id].rx_irq_id, isr_flags,
  775. gdma_hal_get_intr_status_reg(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX), GDMA_LL_RX_EVENT_MASK,
  776. gdma_default_rx_isr, rx_chan, &intr);
  777. ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
  778. rx_chan->base.intr = intr;
  779. portENTER_CRITICAL(&pair->spinlock);
  780. gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interrupt events
  781. gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
  782. portEXIT_CRITICAL(&pair->spinlock);
  783. ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair_id);
  784. err:
  785. return ret;
  786. }
  787. static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan)
  788. {
  789. esp_err_t ret = ESP_OK;
  790. gdma_pair_t *pair = tx_chan->base.pair;
  791. gdma_group_t *group = pair->group;
  792. gdma_hal_context_t *hal = &group->hal;
  793. int pair_id = pair->pair_id;
  794. // pre-alloc a interrupt handle, with handler disabled
  795. int isr_flags = GDMA_INTR_ALLOC_FLAGS;
  796. #if GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT
  797. isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
  798. #endif
  799. intr_handle_t intr = NULL;
  800. ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair_id].tx_irq_id, isr_flags,
  801. gdma_hal_get_intr_status_reg(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX), GDMA_LL_TX_EVENT_MASK,
  802. gdma_default_tx_isr, tx_chan, &intr);
  803. ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
  804. tx_chan->base.intr = intr;
  805. portENTER_CRITICAL(&pair->spinlock);
  806. gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interrupt events
  807. gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
  808. portEXIT_CRITICAL(&pair->spinlock);
  809. ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair_id);
  810. err:
  811. return ret;
  812. }