rmt_tx.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <sys/cdefs.h>
  9. #include <sys/param.h>
  10. #include "sdkconfig.h"
  11. #if CONFIG_RMT_ENABLE_DEBUG_LOG
  12. // The local log level must be defined before including esp_log.h
  13. // Set the maximum log level for this source file
  14. #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  15. #endif
  16. #include "esp_log.h"
  17. #include "esp_check.h"
  18. #include "esp_rom_gpio.h"
  19. #include "soc/rmt_periph.h"
  20. #include "soc/rtc.h"
  21. #include "hal/rmt_ll.h"
  22. #include "hal/gpio_hal.h"
  23. #include "driver/gpio.h"
  24. #include "driver/rmt_tx.h"
  25. #include "rmt_private.h"
  26. #include "esp_memory_utils.h"
  27. static const char *TAG = "rmt";
  28. struct rmt_sync_manager_t {
  29. rmt_group_t *group; // which group the synchro belongs to
  30. uint32_t channel_mask; // Mask of channels that are managed
  31. size_t array_size; // Size of the `tx_channel_array`
  32. rmt_channel_handle_t tx_channel_array[]; // Array of TX channels that are managed
  33. };
  34. static esp_err_t rmt_del_tx_channel(rmt_channel_handle_t channel);
  35. static esp_err_t rmt_tx_modulate_carrier(rmt_channel_handle_t channel, const rmt_carrier_config_t *config);
  36. static esp_err_t rmt_tx_enable(rmt_channel_handle_t channel);
  37. static esp_err_t rmt_tx_disable(rmt_channel_handle_t channel);
  38. static void rmt_tx_default_isr(void *args);
  39. static void rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t);
  40. #if SOC_RMT_SUPPORT_DMA
  41. static bool rmt_dma_tx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data);
  42. static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx_channel_config_t *config)
  43. {
  44. rmt_symbol_word_t *dma_mem_base = heap_caps_calloc(1, sizeof(rmt_symbol_word_t) * config->mem_block_symbols,
  45. RMT_MEM_ALLOC_CAPS | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
  46. ESP_RETURN_ON_FALSE(dma_mem_base, ESP_ERR_NO_MEM, TAG, "no mem for tx DMA buffer");
  47. tx_channel->base.dma_mem_base = dma_mem_base;
  48. for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
  49. // each descriptor shares half of the DMA buffer
  50. tx_channel->dma_nodes_nc[i].buffer = dma_mem_base + tx_channel->ping_pong_symbols * i;
  51. tx_channel->dma_nodes_nc[i].dw0.size = tx_channel->ping_pong_symbols * sizeof(rmt_symbol_word_t);
  52. // the ownership will be switched to DMA in `rmt_tx_do_transaction()`
  53. tx_channel->dma_nodes_nc[i].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
  54. // each node can generate the DMA eof interrupt, and the driver will do a ping-pong trick in the eof callback
  55. tx_channel->dma_nodes_nc[i].dw0.suc_eof = 1;
  56. }
  57. gdma_channel_alloc_config_t dma_chan_config = {
  58. .direction = GDMA_CHANNEL_DIRECTION_TX,
  59. };
  60. ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &tx_channel->base.dma_chan), TAG, "allocate TX DMA channel failed");
  61. gdma_tx_event_callbacks_t cbs = {
  62. .on_trans_eof = rmt_dma_tx_eof_cb,
  63. };
  64. gdma_register_tx_event_callbacks(tx_channel->base.dma_chan, &cbs, tx_channel);
  65. return ESP_OK;
  66. }
  67. #endif // SOC_RMT_SUPPORT_DMA
  68. static esp_err_t rmt_tx_register_to_group(rmt_tx_channel_t *tx_channel, const rmt_tx_channel_config_t *config)
  69. {
  70. size_t mem_block_num = 0;
  71. // start to search for a free channel
  72. // a channel can take up its neighbour's memory block, so the neighbour channel won't work, we should skip these "invaded" ones
  73. int channel_scan_start = RMT_TX_CHANNEL_OFFSET_IN_GROUP;
  74. int channel_scan_end = RMT_TX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_TX_CANDIDATES_PER_GROUP;
  75. if (config->flags.with_dma) {
  76. // for DMA mode, the memory block number is always 1; for non-DMA mode, memory block number is configured by user
  77. mem_block_num = 1;
  78. // Only the last channel has the DMA capability
  79. channel_scan_start = RMT_TX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_TX_CANDIDATES_PER_GROUP - 1;
  80. tx_channel->ping_pong_symbols = config->mem_block_symbols / 2;
  81. } else {
  82. // one channel can occupy multiple memory blocks
  83. mem_block_num = config->mem_block_symbols / SOC_RMT_MEM_WORDS_PER_CHANNEL;
  84. if (mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL < config->mem_block_symbols) {
  85. mem_block_num++;
  86. }
  87. tx_channel->ping_pong_symbols = mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL / 2;
  88. }
  89. tx_channel->base.mem_block_num = mem_block_num;
  90. // search free channel and then register to the group
  91. // memory blocks used by one channel must be continuous
  92. uint32_t channel_mask = (1 << mem_block_num) - 1;
  93. rmt_group_t *group = NULL;
  94. int channel_id = -1;
  95. for (int i = 0; i < SOC_RMT_GROUPS; i++) {
  96. group = rmt_acquire_group_handle(i);
  97. ESP_RETURN_ON_FALSE(group, ESP_ERR_NO_MEM, TAG, "no mem for group (%d)", i);
  98. portENTER_CRITICAL(&group->spinlock);
  99. for (int j = channel_scan_start; j < channel_scan_end; j++) {
  100. if (!(group->occupy_mask & (channel_mask << j))) {
  101. group->occupy_mask |= (channel_mask << j);
  102. // the channel ID should index from 0
  103. channel_id = j - RMT_TX_CHANNEL_OFFSET_IN_GROUP;
  104. group->tx_channels[channel_id] = tx_channel;
  105. break;
  106. }
  107. }
  108. portEXIT_CRITICAL(&group->spinlock);
  109. if (channel_id < 0) {
  110. // didn't find a capable channel in the group, don't forget to release the group handle
  111. rmt_release_group_handle(group);
  112. } else {
  113. tx_channel->base.channel_id = channel_id;
  114. tx_channel->base.channel_mask = channel_mask;
  115. tx_channel->base.group = group;
  116. break;
  117. }
  118. }
  119. ESP_RETURN_ON_FALSE(channel_id >= 0, ESP_ERR_NOT_FOUND, TAG, "no free tx channels");
  120. return ESP_OK;
  121. }
  122. static void rmt_tx_unregister_from_group(rmt_channel_t *channel, rmt_group_t *group)
  123. {
  124. portENTER_CRITICAL(&group->spinlock);
  125. group->tx_channels[channel->channel_id] = NULL;
  126. group->occupy_mask &= ~(channel->channel_mask << (channel->channel_id + RMT_TX_CHANNEL_OFFSET_IN_GROUP));
  127. portEXIT_CRITICAL(&group->spinlock);
  128. // channel has a reference on group, release it now
  129. rmt_release_group_handle(group);
  130. }
  131. static esp_err_t rmt_tx_create_trans_queue(rmt_tx_channel_t *tx_channel, const rmt_tx_channel_config_t *config)
  132. {
  133. esp_err_t ret;
  134. tx_channel->queue_size = config->trans_queue_depth;
  135. // Allocate transaction queues. Each queue only holds pointers to the transaction descriptors
  136. for (int i = 0; i < RMT_TX_QUEUE_MAX; i++) {
  137. tx_channel->trans_queues[i] = xQueueCreateWithCaps(config->trans_queue_depth, sizeof(rmt_tx_trans_desc_t *), RMT_MEM_ALLOC_CAPS);
  138. ESP_GOTO_ON_FALSE(tx_channel->trans_queues[i], ESP_ERR_NO_MEM, exit, TAG, "no mem for queues");
  139. }
  140. // Initialize the ready queue
  141. rmt_tx_trans_desc_t *p_trans_desc = NULL;
  142. for (int i = 0; i < config->trans_queue_depth; i++) {
  143. p_trans_desc = &tx_channel->trans_desc_pool[i];
  144. ESP_GOTO_ON_FALSE(xQueueSend(tx_channel->trans_queues[RMT_TX_QUEUE_READY], &p_trans_desc, 0) == pdTRUE,
  145. ESP_ERR_INVALID_STATE, exit, TAG, "ready queue full");
  146. }
  147. return ESP_OK;
  148. exit:
  149. for (int i = 0; i < RMT_TX_QUEUE_MAX; i++) {
  150. if (tx_channel->trans_queues[i]) {
  151. vQueueDeleteWithCaps(tx_channel->trans_queues[i]);
  152. tx_channel->trans_queues[i] = NULL;
  153. }
  154. }
  155. return ret;
  156. }
  157. static esp_err_t rmt_tx_destroy(rmt_tx_channel_t *tx_channel)
  158. {
  159. if (tx_channel->base.intr) {
  160. ESP_RETURN_ON_ERROR(esp_intr_free(tx_channel->base.intr), TAG, "delete interrupt service failed");
  161. }
  162. if (tx_channel->base.pm_lock) {
  163. ESP_RETURN_ON_ERROR(esp_pm_lock_delete(tx_channel->base.pm_lock), TAG, "delete pm_lock failed");
  164. }
  165. #if SOC_RMT_SUPPORT_DMA
  166. if (tx_channel->base.dma_chan) {
  167. ESP_RETURN_ON_ERROR(gdma_del_channel(tx_channel->base.dma_chan), TAG, "delete dma channel failed");
  168. }
  169. #endif // SOC_RMT_SUPPORT_DMA
  170. for (int i = 0; i < RMT_TX_QUEUE_MAX; i++) {
  171. if (tx_channel->trans_queues[i]) {
  172. vQueueDeleteWithCaps(tx_channel->trans_queues[i]);
  173. }
  174. }
  175. if (tx_channel->base.dma_mem_base) {
  176. free(tx_channel->base.dma_mem_base);
  177. }
  178. if (tx_channel->base.group) {
  179. // de-register channel from RMT group
  180. rmt_tx_unregister_from_group(&tx_channel->base, tx_channel->base.group);
  181. }
  182. if (tx_channel->dma_nodes) {
  183. free(tx_channel->dma_nodes);
  184. }
  185. free(tx_channel);
  186. return ESP_OK;
  187. }
  188. esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_handle_t *ret_chan)
  189. {
  190. #if CONFIG_RMT_ENABLE_DEBUG_LOG
  191. esp_log_level_set(TAG, ESP_LOG_DEBUG);
  192. #endif
  193. esp_err_t ret = ESP_OK;
  194. rmt_tx_channel_t *tx_channel = NULL;
  195. // Check if priority is valid
  196. if (config->intr_priority) {
  197. ESP_RETURN_ON_FALSE((config->intr_priority) > 0, ESP_ERR_INVALID_ARG, TAG, "invalid interrupt priority:%d", config->intr_priority);
  198. ESP_RETURN_ON_FALSE(1 << (config->intr_priority) & RMT_ALLOW_INTR_PRIORITY_MASK, ESP_ERR_INVALID_ARG, TAG, "invalid interrupt priority:%d", config->intr_priority);
  199. }
  200. ESP_GOTO_ON_FALSE(config && ret_chan && config->resolution_hz && config->trans_queue_depth, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  201. ESP_GOTO_ON_FALSE(GPIO_IS_VALID_GPIO(config->gpio_num), ESP_ERR_INVALID_ARG, err, TAG, "invalid GPIO number");
  202. ESP_GOTO_ON_FALSE((config->mem_block_symbols & 0x01) == 0 && config->mem_block_symbols >= SOC_RMT_MEM_WORDS_PER_CHANNEL,
  203. ESP_ERR_INVALID_ARG, err, TAG, "mem_block_symbols must be even and at least %d", SOC_RMT_MEM_WORDS_PER_CHANNEL);
  204. #if SOC_RMT_SUPPORT_DMA
  205. // we only support 2 nodes ping-pong, if the configured memory block size needs more than two DMA descriptors, should treat it as invalid
  206. ESP_GOTO_ON_FALSE(config->mem_block_symbols <= RMT_DMA_DESC_BUF_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t),
  207. ESP_ERR_INVALID_ARG, err, TAG, "mem_block_symbols can't exceed %d",
  208. RMT_DMA_DESC_BUF_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t));
  209. #else
  210. ESP_GOTO_ON_FALSE(config->flags.with_dma == 0, ESP_ERR_NOT_SUPPORTED, err, TAG, "DMA not supported");
  211. #endif
  212. // malloc channel memory
  213. uint32_t mem_caps = RMT_MEM_ALLOC_CAPS;
  214. tx_channel = heap_caps_calloc(1, sizeof(rmt_tx_channel_t) + sizeof(rmt_tx_trans_desc_t) * config->trans_queue_depth, mem_caps);
  215. ESP_GOTO_ON_FALSE(tx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for tx channel");
  216. // create DMA descriptors
  217. if (config->flags.with_dma) {
  218. // DMA descriptors must be placed in internal SRAM
  219. mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
  220. tx_channel->dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, RMT_DMA_NODES_PING_PONG, sizeof(rmt_dma_descriptor_t), mem_caps);
  221. ESP_GOTO_ON_FALSE(tx_channel->dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for tx DMA nodes");
  222. // we will use the non-cached address to manipulate the DMA descriptor, for simplicity
  223. tx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(tx_channel->dma_nodes);
  224. }
  225. // create transaction queues
  226. ESP_GOTO_ON_ERROR(rmt_tx_create_trans_queue(tx_channel, config), err, TAG, "install trans queues failed");
  227. // register the channel to group
  228. ESP_GOTO_ON_ERROR(rmt_tx_register_to_group(tx_channel, config), err, TAG, "register channel failed");
  229. rmt_group_t *group = tx_channel->base.group;
  230. rmt_hal_context_t *hal = &group->hal;
  231. int channel_id = tx_channel->base.channel_id;
  232. int group_id = group->group_id;
  233. // reset channel, make sure the TX engine is not working, and events are cleared
  234. portENTER_CRITICAL(&group->spinlock);
  235. rmt_hal_tx_channel_reset(&group->hal, channel_id);
  236. portEXIT_CRITICAL(&group->spinlock);
  237. // install tx interrupt
  238. // --- install interrupt service
  239. // interrupt is mandatory to run basic RMT transactions, so it's not lazy installed in `rmt_tx_register_event_callbacks()`
  240. // 1-- Set user specified priority to `group->intr_priority`
  241. bool priority_conflict = rmt_set_intr_priority_to_group(group, config->intr_priority);
  242. ESP_GOTO_ON_FALSE(!priority_conflict, ESP_ERR_INVALID_ARG, err, TAG, "intr_priority conflict");
  243. // 2-- Get interrupt allocation flag
  244. int isr_flags = rmt_get_isr_flags(group);
  245. // 3-- Allocate interrupt using isr_flag
  246. ret = esp_intr_alloc_intrstatus(rmt_periph_signals.groups[group_id].irq, isr_flags,
  247. (uint32_t) rmt_ll_get_interrupt_status_reg(hal->regs),
  248. RMT_LL_EVENT_TX_MASK(channel_id), rmt_tx_default_isr, tx_channel,
  249. &tx_channel->base.intr);
  250. ESP_GOTO_ON_ERROR(ret, err, TAG, "install tx interrupt failed");
  251. // install DMA service
  252. #if SOC_RMT_SUPPORT_DMA
  253. if (config->flags.with_dma) {
  254. ESP_GOTO_ON_ERROR(rmt_tx_init_dma_link(tx_channel, config), err, TAG, "install tx DMA failed");
  255. }
  256. #endif
  257. // select the clock source
  258. ESP_GOTO_ON_ERROR(rmt_select_periph_clock(&tx_channel->base, config->clk_src), err, TAG, "set group clock failed");
  259. // set channel clock resolution
  260. uint32_t real_div = group->resolution_hz / config->resolution_hz;
  261. rmt_ll_tx_set_channel_clock_div(hal->regs, channel_id, real_div);
  262. // resolution lost due to division, calculate the real resolution
  263. tx_channel->base.resolution_hz = group->resolution_hz / real_div;
  264. if (tx_channel->base.resolution_hz != config->resolution_hz) {
  265. ESP_LOGW(TAG, "channel resolution loss, real=%"PRIu32, tx_channel->base.resolution_hz);
  266. }
  267. rmt_ll_tx_set_mem_blocks(hal->regs, channel_id, tx_channel->base.mem_block_num);
  268. // set limit threshold, after transmit ping_pong_symbols size, an interrupt event would be generated
  269. rmt_ll_tx_set_limit(hal->regs, channel_id, tx_channel->ping_pong_symbols);
  270. // disable carrier modulation by default, can reenable by `rmt_apply_carrier()`
  271. rmt_ll_tx_enable_carrier_modulation(hal->regs, channel_id, false);
  272. // idle level is determined by register value
  273. rmt_ll_tx_fix_idle_level(hal->regs, channel_id, 0, true);
  274. // always enable tx wrap, both DMA mode and ping-pong mode rely this feature
  275. rmt_ll_tx_enable_wrap(hal->regs, channel_id, true);
  276. // GPIO Matrix/MUX configuration
  277. tx_channel->base.gpio_num = config->gpio_num;
  278. gpio_config_t gpio_conf = {
  279. .intr_type = GPIO_INTR_DISABLE,
  280. // also enable the input path if `io_loop_back` is on, this is useful for bi-directional buses
  281. .mode = (config->flags.io_od_mode ? GPIO_MODE_OUTPUT_OD : GPIO_MODE_OUTPUT) | (config->flags.io_loop_back ? GPIO_MODE_INPUT : 0),
  282. .pull_down_en = false,
  283. .pull_up_en = true,
  284. .pin_bit_mask = 1ULL << config->gpio_num,
  285. };
  286. ESP_GOTO_ON_ERROR(gpio_config(&gpio_conf), err, TAG, "config GPIO failed");
  287. esp_rom_gpio_connect_out_signal(config->gpio_num,
  288. rmt_periph_signals.groups[group_id].channels[channel_id + RMT_TX_CHANNEL_OFFSET_IN_GROUP].tx_sig,
  289. config->flags.invert_out, false);
  290. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_num], PIN_FUNC_GPIO);
  291. portMUX_INITIALIZE(&tx_channel->base.spinlock);
  292. atomic_init(&tx_channel->base.fsm, RMT_FSM_INIT);
  293. tx_channel->base.direction = RMT_CHANNEL_DIRECTION_TX;
  294. tx_channel->base.hw_mem_base = &RMTMEM.channels[channel_id + RMT_TX_CHANNEL_OFFSET_IN_GROUP].symbols[0];
  295. // polymorphic methods
  296. tx_channel->base.del = rmt_del_tx_channel;
  297. tx_channel->base.set_carrier_action = rmt_tx_modulate_carrier;
  298. tx_channel->base.enable = rmt_tx_enable;
  299. tx_channel->base.disable = rmt_tx_disable;
  300. // return general channel handle
  301. *ret_chan = &tx_channel->base;
  302. ESP_LOGD(TAG, "new tx channel(%d,%d) at %p, gpio=%d, res=%"PRIu32"Hz, hw_mem_base=%p, dma_mem_base=%p, dma_nodes_nc=%p,ping_pong_size=%zu, queue_depth=%zu",
  303. group_id, channel_id, tx_channel, config->gpio_num, tx_channel->base.resolution_hz,
  304. tx_channel->base.hw_mem_base, tx_channel->base.dma_mem_base, tx_channel->dma_nodes_nc, tx_channel->ping_pong_symbols, tx_channel->queue_size);
  305. return ESP_OK;
  306. err:
  307. if (tx_channel) {
  308. rmt_tx_destroy(tx_channel);
  309. }
  310. return ret;
  311. }
  312. static esp_err_t rmt_del_tx_channel(rmt_channel_handle_t channel)
  313. {
  314. ESP_RETURN_ON_FALSE(atomic_load(&channel->fsm) == RMT_FSM_INIT,
  315. ESP_ERR_INVALID_STATE, TAG, "channel not in init state");
  316. rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
  317. rmt_group_t *group = channel->group;
  318. int group_id = group->group_id;
  319. int channel_id = channel->channel_id;
  320. ESP_LOGD(TAG, "del tx channel(%d,%d)", group_id, channel_id);
  321. // recycle memory resource
  322. ESP_RETURN_ON_ERROR(rmt_tx_destroy(tx_chan), TAG, "destroy tx channel failed");
  323. return ESP_OK;
  324. }
  325. esp_err_t rmt_new_sync_manager(const rmt_sync_manager_config_t *config, rmt_sync_manager_handle_t *ret_synchro)
  326. {
  327. #if !SOC_RMT_SUPPORT_TX_SYNCHRO
  328. ESP_RETURN_ON_FALSE(false, ESP_ERR_NOT_SUPPORTED, TAG, "sync manager not supported");
  329. #else
  330. esp_err_t ret = ESP_OK;
  331. rmt_sync_manager_t *synchro = NULL;
  332. ESP_GOTO_ON_FALSE(config && ret_synchro && config->tx_channel_array && config->array_size, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  333. synchro = heap_caps_calloc(1, sizeof(rmt_sync_manager_t) + sizeof(rmt_channel_handle_t) * config->array_size, RMT_MEM_ALLOC_CAPS);
  334. ESP_GOTO_ON_FALSE(synchro, ESP_ERR_NO_MEM, err, TAG, "no mem for sync manager");
  335. for (size_t i = 0; i < config->array_size; i++) {
  336. synchro->tx_channel_array[i] = config->tx_channel_array[i];
  337. }
  338. synchro->array_size = config->array_size;
  339. int group_id = config->tx_channel_array[0]->group->group_id;
  340. // acquire group handle, increase reference count
  341. rmt_group_t *group = rmt_acquire_group_handle(group_id);
  342. // sanity check
  343. assert(group);
  344. synchro->group = group;
  345. // calculate the mask of the channels to be managed
  346. uint32_t channel_mask = 0;
  347. rmt_channel_handle_t channel = NULL;
  348. for (size_t i = 0; i < config->array_size; i++) {
  349. channel = config->tx_channel_array[i];
  350. ESP_GOTO_ON_FALSE(channel->direction == RMT_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, err, TAG, "sync manager supports TX channel only");
  351. ESP_GOTO_ON_FALSE(channel->group == group, ESP_ERR_INVALID_ARG, err, TAG, "channels to be managed should locate in the same group");
  352. ESP_GOTO_ON_FALSE(atomic_load(&channel->fsm) == RMT_FSM_ENABLE, ESP_ERR_INVALID_STATE, err, TAG, "channel not in enable state");
  353. channel_mask |= 1 << channel->channel_id;
  354. }
  355. synchro->channel_mask = channel_mask;
  356. // search and register sync manager to group
  357. bool new_synchro = false;
  358. portENTER_CRITICAL(&group->spinlock);
  359. if (group->sync_manager == NULL) {
  360. group->sync_manager = synchro;
  361. new_synchro = true;
  362. }
  363. portEXIT_CRITICAL(&group->spinlock);
  364. ESP_GOTO_ON_FALSE(new_synchro, ESP_ERR_NOT_FOUND, err, TAG, "no free sync manager in the group");
  365. // enable sync manager
  366. portENTER_CRITICAL(&group->spinlock);
  367. rmt_ll_tx_enable_sync(group->hal.regs, true);
  368. rmt_ll_tx_sync_group_add_channels(group->hal.regs, channel_mask);
  369. rmt_ll_tx_reset_channels_clock_div(group->hal.regs, channel_mask);
  370. // ensure the reading cursor of each channel is pulled back to the starting line
  371. for (size_t i = 0; i < config->array_size; i++) {
  372. rmt_ll_tx_reset_pointer(group->hal.regs, config->tx_channel_array[i]->channel_id);
  373. }
  374. portEXIT_CRITICAL(&group->spinlock);
  375. *ret_synchro = synchro;
  376. ESP_LOGD(TAG, "new sync manager at %p, with channel mask:%02"PRIx32, synchro, synchro->channel_mask);
  377. return ESP_OK;
  378. err:
  379. if (synchro) {
  380. if (synchro->group) {
  381. rmt_release_group_handle(synchro->group);
  382. }
  383. free(synchro);
  384. }
  385. return ret;
  386. #endif // !SOC_RMT_SUPPORT_TX_SYNCHRO
  387. }
  388. esp_err_t rmt_sync_reset(rmt_sync_manager_handle_t synchro)
  389. {
  390. #if !SOC_RMT_SUPPORT_TX_SYNCHRO
  391. ESP_RETURN_ON_FALSE(false, ESP_ERR_NOT_SUPPORTED, TAG, "sync manager not supported");
  392. #else
  393. ESP_RETURN_ON_FALSE(synchro, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  394. rmt_group_t *group = synchro->group;
  395. portENTER_CRITICAL(&group->spinlock);
  396. rmt_ll_tx_reset_channels_clock_div(group->hal.regs, synchro->channel_mask);
  397. for (size_t i = 0; i < synchro->array_size; i++) {
  398. rmt_ll_tx_reset_pointer(group->hal.regs, synchro->tx_channel_array[i]->channel_id);
  399. }
  400. portEXIT_CRITICAL(&group->spinlock);
  401. return ESP_OK;
  402. #endif // !SOC_RMT_SUPPORT_TX_SYNCHRO
  403. }
  404. esp_err_t rmt_del_sync_manager(rmt_sync_manager_handle_t synchro)
  405. {
  406. #if !SOC_RMT_SUPPORT_TX_SYNCHRO
  407. ESP_RETURN_ON_FALSE(false, ESP_ERR_NOT_SUPPORTED, TAG, "sync manager not supported");
  408. #else
  409. ESP_RETURN_ON_FALSE(synchro, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  410. rmt_group_t *group = synchro->group;
  411. int group_id = group->group_id;
  412. portENTER_CRITICAL(&group->spinlock);
  413. group->sync_manager = NULL;
  414. // disable sync manager
  415. rmt_ll_tx_enable_sync(group->hal.regs, false);
  416. rmt_ll_tx_sync_group_remove_channels(group->hal.regs, synchro->channel_mask);
  417. portEXIT_CRITICAL(&group->spinlock);
  418. free(synchro);
  419. ESP_LOGD(TAG, "del sync manager in group(%d)", group_id);
  420. rmt_release_group_handle(group);
  421. return ESP_OK;
  422. #endif // !SOC_RMT_SUPPORT_TX_SYNCHRO
  423. }
  424. esp_err_t rmt_tx_register_event_callbacks(rmt_channel_handle_t channel, const rmt_tx_event_callbacks_t *cbs, void *user_data)
  425. {
  426. ESP_RETURN_ON_FALSE(channel && cbs, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  427. ESP_RETURN_ON_FALSE(channel->direction == RMT_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid channel direction");
  428. rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
  429. #if CONFIG_RMT_ISR_IRAM_SAFE
  430. if (cbs->on_trans_done) {
  431. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_done), ESP_ERR_INVALID_ARG, TAG, "on_trans_done callback not in IRAM");
  432. }
  433. if (user_data) {
  434. ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, TAG, "user context not in internal RAM");
  435. }
  436. #endif
  437. tx_chan->on_trans_done = cbs->on_trans_done;
  438. tx_chan->user_data = user_data;
  439. return ESP_OK;
  440. }
  441. esp_err_t rmt_transmit(rmt_channel_handle_t channel, rmt_encoder_t *encoder, const void *payload, size_t payload_bytes, const rmt_transmit_config_t *config)
  442. {
  443. ESP_RETURN_ON_FALSE(channel && encoder && payload && payload_bytes && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  444. ESP_RETURN_ON_FALSE(channel->direction == RMT_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid channel direction");
  445. #if !SOC_RMT_SUPPORT_TX_LOOP_COUNT
  446. ESP_RETURN_ON_FALSE(config->loop_count <= 0, ESP_ERR_NOT_SUPPORTED, TAG, "loop count is not supported");
  447. #endif // !SOC_RMT_SUPPORT_TX_LOOP_COUNT
  448. #if CONFIG_RMT_ISR_IRAM_SAFE
  449. // payload is retrieved by the encoder, we should make sure it's still accessible even when the cache is disabled
  450. ESP_RETURN_ON_FALSE(esp_ptr_internal(payload), ESP_ERR_INVALID_ARG, TAG, "payload not in internal RAM");
  451. #endif
  452. TickType_t queue_wait_ticks = portMAX_DELAY;
  453. if (config->flags.queue_nonblocking) {
  454. queue_wait_ticks = 0;
  455. }
  456. rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
  457. rmt_tx_trans_desc_t *t = NULL;
  458. // acquire one transaction description from ready queue or complete queue
  459. if (xQueueReceive(tx_chan->trans_queues[RMT_TX_QUEUE_READY], &t, 0) != pdTRUE) {
  460. if (xQueueReceive(tx_chan->trans_queues[RMT_TX_QUEUE_COMPLETE], &t, queue_wait_ticks) == pdTRUE) {
  461. tx_chan->num_trans_inflight--;
  462. }
  463. }
  464. ESP_RETURN_ON_FALSE(t, ESP_ERR_INVALID_STATE, TAG, "no free transaction descriptor, please consider increasing trans_queue_depth");
  465. // fill in the transaction descriptor
  466. memset(t, 0, sizeof(rmt_tx_trans_desc_t));
  467. t->encoder = encoder;
  468. t->payload = payload;
  469. t->payload_bytes = payload_bytes;
  470. t->loop_count = config->loop_count;
  471. t->remain_loop_count = t->loop_count;
  472. t->flags.eot_level = config->flags.eot_level;
  473. // send the transaction descriptor to queue
  474. if (xQueueSend(tx_chan->trans_queues[RMT_TX_QUEUE_PROGRESS], &t, 0) == pdTRUE) {
  475. tx_chan->num_trans_inflight++;
  476. } else {
  477. // put the trans descriptor back to ready_queue
  478. ESP_RETURN_ON_FALSE(xQueueSend(tx_chan->trans_queues[RMT_TX_QUEUE_READY], &t, 0) == pdTRUE,
  479. ESP_ERR_INVALID_STATE, TAG, "ready queue full");
  480. }
  481. // check if we need to start one pending transaction
  482. rmt_fsm_t expected_fsm = RMT_FSM_ENABLE;
  483. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_RUN_WAIT)) {
  484. // check if we need to start one transaction
  485. if (xQueueReceive(tx_chan->trans_queues[RMT_TX_QUEUE_PROGRESS], &t, 0) == pdTRUE) {
  486. atomic_store(&channel->fsm, RMT_FSM_RUN);
  487. rmt_tx_do_transaction(tx_chan, t);
  488. } else {
  489. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  490. }
  491. }
  492. return ESP_OK;
  493. }
  494. esp_err_t rmt_tx_wait_all_done(rmt_channel_handle_t channel, int timeout_ms)
  495. {
  496. ESP_RETURN_ON_FALSE(channel, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  497. rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
  498. TickType_t wait_ticks = timeout_ms < 0 ? portMAX_DELAY : pdMS_TO_TICKS(timeout_ms);
  499. // recycle all transaction that are on the fly
  500. rmt_tx_trans_desc_t *t = NULL;
  501. size_t num_trans_inflight = tx_chan->num_trans_inflight;
  502. for (size_t i = 0; i < num_trans_inflight; i++) {
  503. ESP_RETURN_ON_FALSE(xQueueReceive(tx_chan->trans_queues[RMT_TX_QUEUE_COMPLETE], &t, wait_ticks) == pdTRUE,
  504. ESP_ERR_TIMEOUT, TAG, "flush timeout");
  505. ESP_RETURN_ON_FALSE(xQueueSend(tx_chan->trans_queues[RMT_TX_QUEUE_READY], &t, 0) == pdTRUE,
  506. ESP_ERR_INVALID_STATE, TAG, "ready queue full");
  507. tx_chan->num_trans_inflight--;
  508. }
  509. return ESP_OK;
  510. }
  511. static void IRAM_ATTR rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan)
  512. {
  513. rmt_channel_t *channel = &tx_chan->base;
  514. rmt_group_t *group = channel->group;
  515. int channel_id = channel->channel_id;
  516. rmt_symbol_word_t *mem_to_nc = NULL;
  517. rmt_tx_trans_desc_t *cur_trans = tx_chan->cur_trans;
  518. rmt_dma_descriptor_t *desc_nc = NULL;
  519. if (channel->dma_chan) {
  520. mem_to_nc = (rmt_symbol_word_t *)RMT_GET_NON_CACHE_ADDR(channel->dma_mem_base);
  521. } else {
  522. mem_to_nc = channel->hw_mem_base;
  523. }
  524. // a RMT word whose duration is zero means a "stop" pattern
  525. mem_to_nc[tx_chan->mem_off++] = (rmt_symbol_word_t) {
  526. .duration0 = 0,
  527. .level0 = cur_trans->flags.eot_level,
  528. .duration1 = 0,
  529. .level1 = cur_trans->flags.eot_level,
  530. };
  531. size_t off = 0;
  532. if (channel->dma_chan) {
  533. if (tx_chan->mem_off <= tx_chan->ping_pong_symbols) {
  534. desc_nc = &tx_chan->dma_nodes_nc[0];
  535. off = tx_chan->mem_off;
  536. } else {
  537. desc_nc = &tx_chan->dma_nodes_nc[1];
  538. off = tx_chan->mem_off - tx_chan->ping_pong_symbols;
  539. }
  540. desc_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  541. desc_nc->dw0.length = off * sizeof(rmt_symbol_word_t);
  542. // break down the DMA descriptor link
  543. desc_nc->next = NULL;
  544. } else {
  545. portENTER_CRITICAL_ISR(&group->spinlock);
  546. // This is the end of a sequence of encoding sessions, disable the threshold interrupt as no more data will be put into RMT memory block
  547. rmt_ll_enable_interrupt(group->hal.regs, RMT_LL_EVENT_TX_THRES(channel_id), false);
  548. portEXIT_CRITICAL_ISR(&group->spinlock);
  549. }
  550. }
  551. static size_t IRAM_ATTR rmt_encode_check_result(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t)
  552. {
  553. rmt_encode_state_t encode_state = RMT_ENCODING_RESET;
  554. rmt_encoder_handle_t encoder = t->encoder;
  555. size_t encoded_symbols = encoder->encode(encoder, &tx_chan->base, t->payload, t->payload_bytes, &encode_state);
  556. if (encode_state & RMT_ENCODING_COMPLETE) {
  557. t->flags.encoding_done = true;
  558. // inserting EOF symbol if there's extra space
  559. if (!(encode_state & RMT_ENCODING_MEM_FULL)) {
  560. rmt_tx_mark_eof(tx_chan);
  561. encoded_symbols += 1;
  562. }
  563. }
  564. // for loop transaction, the memory block should accommodate all encoded RMT symbols
  565. if (t->loop_count != 0) {
  566. if (unlikely(encoded_symbols > tx_chan->base.mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL)) {
  567. ESP_DRAM_LOGE(TAG, "encoding artifacts can't exceed hw memory block for loop transmission");
  568. }
  569. }
  570. return encoded_symbols;
  571. }
  572. static void IRAM_ATTR rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t)
  573. {
  574. rmt_channel_t *channel = &tx_chan->base;
  575. rmt_group_t *group = channel->group;
  576. rmt_hal_context_t *hal = &group->hal;
  577. int channel_id = channel->channel_id;
  578. // update current transaction
  579. tx_chan->cur_trans = t;
  580. #if SOC_RMT_SUPPORT_DMA
  581. if (channel->dma_chan) {
  582. gdma_reset(channel->dma_chan);
  583. // chain the descriptors into a ring, and will break it in `rmt_encode_eof()`
  584. for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
  585. tx_chan->dma_nodes_nc[i].next = &tx_chan->dma_nodes[i + 1]; // note, we must use the cache address for the next pointer
  586. tx_chan->dma_nodes_nc[i].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
  587. }
  588. tx_chan->dma_nodes_nc[1].next = &tx_chan->dma_nodes[0];
  589. }
  590. #endif // SOC_RMT_SUPPORT_DMA
  591. // set transaction specific parameters
  592. portENTER_CRITICAL_SAFE(&channel->spinlock);
  593. rmt_ll_tx_reset_pointer(hal->regs, channel_id); // reset pointer for new transaction
  594. rmt_ll_tx_enable_loop(hal->regs, channel_id, t->loop_count != 0);
  595. #if SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
  596. rmt_ll_tx_enable_loop_autostop(hal->regs, channel_id, true);
  597. #endif // SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
  598. #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
  599. rmt_ll_tx_reset_loop_count(hal->regs, channel_id);
  600. rmt_ll_tx_enable_loop_count(hal->regs, channel_id, t->loop_count > 0);
  601. // transfer loops in batches
  602. if (t->remain_loop_count > 0) {
  603. uint32_t this_loop_count = MIN(t->remain_loop_count, RMT_LL_MAX_LOOP_COUNT_PER_BATCH);
  604. rmt_ll_tx_set_loop_count(hal->regs, channel_id, this_loop_count);
  605. t->remain_loop_count -= this_loop_count;
  606. }
  607. #endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT
  608. portEXIT_CRITICAL_SAFE(&channel->spinlock);
  609. // enable/disable specific interrupts
  610. portENTER_CRITICAL_SAFE(&group->spinlock);
  611. #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
  612. rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_TX_LOOP_END(channel_id), t->loop_count > 0);
  613. #endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT
  614. // in DMA mode, DMA eof event plays the similar functionality to this threshold interrupt, so only enable it for non-DMA mode
  615. if (!channel->dma_chan) {
  616. // don't enable threshold interrupt with loop mode on
  617. // threshold interrupt will be disabled in `rmt_encode_eof()`
  618. rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_TX_THRES(channel_id), t->loop_count == 0);
  619. // Threshold interrupt will be generated by accident, clear it before starting new transmission
  620. rmt_ll_clear_interrupt_status(hal->regs, RMT_LL_EVENT_TX_THRES(channel_id));
  621. }
  622. // don't generate trans done event for loop transmission
  623. rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_TX_DONE(channel_id), t->loop_count == 0);
  624. portEXIT_CRITICAL_SAFE(&group->spinlock);
  625. // at the beginning of a new transaction, encoding memory offset should start from zero.
  626. // It will increase in the encode function e.g. `rmt_encode_copy()`
  627. tx_chan->mem_off = 0;
  628. // use the full memory block for the beginning encoding session
  629. tx_chan->mem_end = tx_chan->ping_pong_symbols * 2;
  630. // perform the encoding session, return the number of encoded symbols
  631. t->transmitted_symbol_num = rmt_encode_check_result(tx_chan, t);
  632. // we're going to perform ping-pong operation, so the next encoding end position is the middle
  633. tx_chan->mem_end = tx_chan->ping_pong_symbols;
  634. #if SOC_RMT_SUPPORT_DMA
  635. if (channel->dma_chan) {
  636. gdma_start(channel->dma_chan, (intptr_t)tx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
  637. // delay a while, wait for DMA data going to RMT memory block
  638. esp_rom_delay_us(1);
  639. }
  640. #endif
  641. // turn on the TX machine
  642. portENTER_CRITICAL_SAFE(&channel->spinlock);
  643. rmt_ll_tx_fix_idle_level(hal->regs, channel_id, t->flags.eot_level, true);
  644. rmt_ll_tx_start(hal->regs, channel_id);
  645. portEXIT_CRITICAL_SAFE(&channel->spinlock);
  646. }
  647. static esp_err_t rmt_tx_enable(rmt_channel_handle_t channel)
  648. {
  649. // can enable the channel when it's in "init" state
  650. rmt_fsm_t expected_fsm = RMT_FSM_INIT;
  651. ESP_RETURN_ON_FALSE(atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_ENABLE_WAIT),
  652. ESP_ERR_INVALID_STATE, TAG, "channel not in init state");
  653. rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
  654. // acquire power manager lock
  655. if (channel->pm_lock) {
  656. esp_pm_lock_acquire(channel->pm_lock);
  657. }
  658. #if SOC_RMT_SUPPORT_DMA
  659. rmt_group_t *group = channel->group;
  660. rmt_hal_context_t *hal = &group->hal;
  661. int channel_id = channel->channel_id;
  662. if (channel->dma_chan) {
  663. // enable the DMA access mode
  664. portENTER_CRITICAL(&channel->spinlock);
  665. rmt_ll_tx_enable_dma(hal->regs, channel_id, true);
  666. portEXIT_CRITICAL(&channel->spinlock);
  667. gdma_connect(channel->dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_RMT, 0));
  668. }
  669. #endif // SOC_RMT_SUPPORT_DMA
  670. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  671. // check if we need to start one pending transaction
  672. rmt_tx_trans_desc_t *t = NULL;
  673. expected_fsm = RMT_FSM_ENABLE;
  674. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_RUN_WAIT)) {
  675. if (xQueueReceive(tx_chan->trans_queues[RMT_TX_QUEUE_PROGRESS], &t, 0) == pdTRUE) {
  676. // sanity check
  677. assert(t);
  678. atomic_store(&channel->fsm, RMT_FSM_RUN);
  679. rmt_tx_do_transaction(tx_chan, t);
  680. } else {
  681. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  682. }
  683. }
  684. return ESP_OK;
  685. }
  686. static esp_err_t rmt_tx_disable(rmt_channel_handle_t channel)
  687. {
  688. rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
  689. rmt_group_t *group = channel->group;
  690. rmt_hal_context_t *hal = &group->hal;
  691. int channel_id = channel->channel_id;
  692. // can disable the channel when it's in `enable` or `run` state
  693. bool valid_state = false;
  694. rmt_fsm_t expected_fsm = RMT_FSM_ENABLE;
  695. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_INIT_WAIT)) {
  696. valid_state = true;
  697. }
  698. expected_fsm = RMT_FSM_RUN;
  699. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_INIT_WAIT)) {
  700. valid_state = true;
  701. // disable the hardware
  702. portENTER_CRITICAL(&channel->spinlock);
  703. rmt_ll_tx_enable_loop(hal->regs, channel->channel_id, false);
  704. #if SOC_RMT_SUPPORT_TX_ASYNC_STOP
  705. rmt_ll_tx_stop(hal->regs, channel->channel_id);
  706. #endif
  707. portEXIT_CRITICAL(&channel->spinlock);
  708. portENTER_CRITICAL(&group->spinlock);
  709. rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_TX_MASK(channel_id), false);
  710. #if !SOC_RMT_SUPPORT_TX_ASYNC_STOP
  711. // we do a trick to stop the undergoing transmission
  712. // stop interrupt, insert EOF marker to the RMT memory, polling the trans_done event
  713. channel->hw_mem_base[0].val = 0;
  714. while (!(rmt_ll_tx_get_interrupt_status_raw(hal->regs, channel_id) & RMT_LL_EVENT_TX_DONE(channel_id))) {}
  715. #endif
  716. rmt_ll_clear_interrupt_status(hal->regs, RMT_LL_EVENT_TX_MASK(channel_id));
  717. portEXIT_CRITICAL(&group->spinlock);
  718. }
  719. ESP_RETURN_ON_FALSE(valid_state, ESP_ERR_INVALID_STATE, TAG, "channel can't be disabled in state %d", expected_fsm);
  720. // disable the DMA
  721. #if SOC_RMT_SUPPORT_DMA
  722. if (channel->dma_chan) {
  723. gdma_stop(channel->dma_chan);
  724. gdma_disconnect(channel->dma_chan);
  725. // disable DMA access mode
  726. portENTER_CRITICAL(&channel->spinlock);
  727. rmt_ll_tx_enable_dma(hal->regs, channel_id, false);
  728. portEXIT_CRITICAL(&channel->spinlock);
  729. }
  730. #endif
  731. // recycle the interrupted transaction
  732. if (tx_chan->cur_trans) {
  733. xQueueSend(tx_chan->trans_queues[RMT_TX_QUEUE_COMPLETE], &tx_chan->cur_trans, 0);
  734. // reset corresponding encoder
  735. rmt_encoder_reset(tx_chan->cur_trans->encoder);
  736. }
  737. tx_chan->cur_trans = NULL;
  738. // release power manager lock
  739. if (channel->pm_lock) {
  740. ESP_RETURN_ON_ERROR(esp_pm_lock_release(channel->pm_lock), TAG, "release pm_lock failed");
  741. }
  742. // finally we switch to the INIT state
  743. atomic_store(&channel->fsm, RMT_FSM_INIT);
  744. return ESP_OK;
  745. }
  746. static esp_err_t rmt_tx_modulate_carrier(rmt_channel_handle_t channel, const rmt_carrier_config_t *config)
  747. {
  748. rmt_group_t *group = channel->group;
  749. rmt_hal_context_t *hal = &group->hal;
  750. int group_id = group->group_id;
  751. int channel_id = channel->channel_id;
  752. uint32_t real_frequency = 0;
  753. if (config && config->frequency_hz) {
  754. // carrier module works base on group clock
  755. uint32_t total_ticks = group->resolution_hz / config->frequency_hz; // Note this division operation will lose precision
  756. uint32_t high_ticks = total_ticks * config->duty_cycle;
  757. uint32_t low_ticks = total_ticks - high_ticks;
  758. portENTER_CRITICAL(&channel->spinlock);
  759. rmt_ll_tx_set_carrier_level(hal->regs, channel_id, !config->flags.polarity_active_low);
  760. rmt_ll_tx_set_carrier_high_low_ticks(hal->regs, channel_id, high_ticks, low_ticks);
  761. #if SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY
  762. rmt_ll_tx_enable_carrier_always_on(hal->regs, channel_id, config->flags.always_on);
  763. #endif
  764. portEXIT_CRITICAL(&channel->spinlock);
  765. // save real carrier frequency
  766. real_frequency = group->resolution_hz / total_ticks;
  767. }
  768. // enable/disable carrier modulation
  769. portENTER_CRITICAL(&channel->spinlock);
  770. rmt_ll_tx_enable_carrier_modulation(hal->regs, channel_id, real_frequency > 0);
  771. portEXIT_CRITICAL(&channel->spinlock);
  772. if (real_frequency > 0) {
  773. ESP_LOGD(TAG, "enable carrier modulation for channel(%d,%d), freq=%"PRIu32"Hz", group_id, channel_id, real_frequency);
  774. } else {
  775. ESP_LOGD(TAG, "disable carrier modulation for channel(%d,%d)", group_id, channel_id);
  776. }
  777. return ESP_OK;
  778. }
  779. static bool IRAM_ATTR rmt_isr_handle_tx_threshold(rmt_tx_channel_t *tx_chan)
  780. {
  781. // continue ping-pong transmission
  782. rmt_tx_trans_desc_t *t = tx_chan->cur_trans;
  783. size_t encoded_symbols = t->transmitted_symbol_num;
  784. // encoding finished, only need to send the EOF symbol
  785. if (t->flags.encoding_done) {
  786. rmt_tx_mark_eof(tx_chan);
  787. encoded_symbols += 1;
  788. } else {
  789. encoded_symbols += rmt_encode_check_result(tx_chan, t);
  790. }
  791. t->transmitted_symbol_num = encoded_symbols;
  792. tx_chan->mem_end = tx_chan->ping_pong_symbols * 3 - tx_chan->mem_end; // mem_end equals to either ping_pong_symbols or ping_pong_symbols*2
  793. return false;
  794. }
  795. static bool IRAM_ATTR rmt_isr_handle_tx_done(rmt_tx_channel_t *tx_chan)
  796. {
  797. rmt_channel_t *channel = &tx_chan->base;
  798. BaseType_t awoken = pdFALSE;
  799. rmt_tx_trans_desc_t *trans_desc = NULL;
  800. bool need_yield = false;
  801. rmt_fsm_t expected_fsm = RMT_FSM_RUN;
  802. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_ENABLE_WAIT)) {
  803. trans_desc = tx_chan->cur_trans;
  804. // move current finished transaction to the complete queue
  805. xQueueSendFromISR(tx_chan->trans_queues[RMT_TX_QUEUE_COMPLETE], &trans_desc, &awoken);
  806. if (awoken == pdTRUE) {
  807. need_yield = true;
  808. }
  809. tx_chan->cur_trans = NULL;
  810. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  811. // invoke callback
  812. rmt_tx_done_callback_t done_cb = tx_chan->on_trans_done;
  813. if (done_cb) {
  814. rmt_tx_done_event_data_t edata = {
  815. .num_symbols = trans_desc->transmitted_symbol_num,
  816. };
  817. if (done_cb(channel, &edata, tx_chan->user_data)) {
  818. need_yield = true;
  819. }
  820. }
  821. }
  822. // let's try start the next pending transaction
  823. expected_fsm = RMT_FSM_ENABLE;
  824. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_RUN_WAIT)) {
  825. if (xQueueReceiveFromISR(tx_chan->trans_queues[RMT_TX_QUEUE_PROGRESS], &trans_desc, &awoken) == pdTRUE) {
  826. // sanity check
  827. assert(trans_desc);
  828. atomic_store(&channel->fsm, RMT_FSM_RUN);
  829. // begin a new transaction
  830. rmt_tx_do_transaction(tx_chan, trans_desc);
  831. if (awoken == pdTRUE) {
  832. need_yield = true;
  833. }
  834. } else {
  835. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  836. }
  837. }
  838. return need_yield;
  839. }
  840. #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
  841. static bool IRAM_ATTR rmt_isr_handle_tx_loop_end(rmt_tx_channel_t *tx_chan)
  842. {
  843. rmt_channel_t *channel = &tx_chan->base;
  844. rmt_group_t *group = channel->group;
  845. rmt_hal_context_t *hal = &group->hal;
  846. uint32_t channel_id = channel->channel_id;
  847. BaseType_t awoken = pdFALSE;
  848. rmt_tx_trans_desc_t *trans_desc = NULL;
  849. bool need_yield = false;
  850. trans_desc = tx_chan->cur_trans;
  851. if (trans_desc) {
  852. #if !SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
  853. portENTER_CRITICAL_ISR(&channel->spinlock);
  854. // This is a workaround for chips that don't support loop auto stop
  855. // Although we stop the transaction immediately in ISR handler, it's still possible that some rmt symbols have sneaked out
  856. rmt_ll_tx_stop(hal->regs, channel_id);
  857. portEXIT_CRITICAL_ISR(&channel->spinlock);
  858. #endif // SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
  859. // continue unfinished loop transaction
  860. if (trans_desc->remain_loop_count) {
  861. uint32_t this_loop_count = MIN(trans_desc->remain_loop_count, RMT_LL_MAX_LOOP_COUNT_PER_BATCH);
  862. trans_desc->remain_loop_count -= this_loop_count;
  863. portENTER_CRITICAL_ISR(&channel->spinlock);
  864. rmt_ll_tx_set_loop_count(hal->regs, channel_id, this_loop_count);
  865. rmt_ll_tx_reset_pointer(hal->regs, channel_id);
  866. // continue the loop transmission, don't need to fill the RMT symbols again, just restart the engine
  867. rmt_ll_tx_start(hal->regs, channel_id);
  868. portEXIT_CRITICAL_ISR(&channel->spinlock);
  869. return need_yield;
  870. }
  871. // loop transaction finished
  872. rmt_fsm_t expected_fsm = RMT_FSM_RUN;
  873. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_ENABLE_WAIT)) {
  874. // move current finished transaction to the complete queue
  875. xQueueSendFromISR(tx_chan->trans_queues[RMT_TX_QUEUE_COMPLETE], &trans_desc, &awoken);
  876. if (awoken == pdTRUE) {
  877. need_yield = true;
  878. }
  879. tx_chan->cur_trans = NULL;
  880. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  881. // invoke callback
  882. rmt_tx_done_callback_t done_cb = tx_chan->on_trans_done;
  883. if (done_cb) {
  884. rmt_tx_done_event_data_t edata = {
  885. .num_symbols = trans_desc->transmitted_symbol_num,
  886. };
  887. if (done_cb(channel, &edata, tx_chan->user_data)) {
  888. need_yield = true;
  889. }
  890. }
  891. }
  892. // let's try start the next pending transaction
  893. expected_fsm = RMT_FSM_ENABLE;
  894. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_RUN_WAIT)) {
  895. if (xQueueReceiveFromISR(tx_chan->trans_queues[RMT_TX_QUEUE_PROGRESS], &trans_desc, &awoken) == pdTRUE) {
  896. // sanity check
  897. assert(trans_desc);
  898. atomic_store(&channel->fsm, RMT_FSM_RUN);
  899. // begin a new transaction
  900. rmt_tx_do_transaction(tx_chan, trans_desc);
  901. if (awoken == pdTRUE) {
  902. need_yield = true;
  903. }
  904. } else {
  905. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  906. }
  907. }
  908. }
  909. if (awoken == pdTRUE) {
  910. need_yield = true;
  911. }
  912. return need_yield;
  913. }
  914. #endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT
  915. static void IRAM_ATTR rmt_tx_default_isr(void *args)
  916. {
  917. rmt_tx_channel_t *tx_chan = (rmt_tx_channel_t *)args;
  918. rmt_channel_t *channel = &tx_chan->base;
  919. rmt_group_t *group = channel->group;
  920. rmt_hal_context_t *hal = &group->hal;
  921. uint32_t channel_id = channel->channel_id;
  922. bool need_yield = false;
  923. uint32_t status = rmt_ll_tx_get_interrupt_status(hal->regs, channel_id);
  924. rmt_ll_clear_interrupt_status(hal->regs, status);
  925. // Tx threshold interrupt
  926. if (status & RMT_LL_EVENT_TX_THRES(channel_id)) {
  927. if (rmt_isr_handle_tx_threshold(tx_chan)) {
  928. need_yield = true;
  929. }
  930. }
  931. // Tx end interrupt
  932. if (status & RMT_LL_EVENT_TX_DONE(channel_id)) {
  933. if (rmt_isr_handle_tx_done(tx_chan)) {
  934. need_yield = true;
  935. }
  936. }
  937. #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
  938. // Tx loop end interrupt
  939. if (status & RMT_LL_EVENT_TX_LOOP_END(channel_id)) {
  940. if (rmt_isr_handle_tx_loop_end(tx_chan)) {
  941. need_yield = true;
  942. }
  943. }
  944. #endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT
  945. if (need_yield) {
  946. portYIELD_FROM_ISR();
  947. }
  948. }
  949. #if SOC_RMT_SUPPORT_DMA
  950. static bool IRAM_ATTR rmt_dma_tx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
  951. {
  952. rmt_tx_channel_t *tx_chan = (rmt_tx_channel_t *)user_data;
  953. rmt_dma_descriptor_t *eof_desc_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(event_data->tx_eof_desc_addr);
  954. rmt_dma_descriptor_t *n = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(eof_desc_nc->next); // next points to a cache address, needs to convert it to a non-cached one
  955. if (n) {
  956. rmt_dma_descriptor_t *nn = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(n->next);
  957. // if the DMA descriptor link is still a ring (i.e. hasn't broken down by `rmt_tx_mark_eof()`), then we treat it as a valid ping-pong event
  958. if (nn) {
  959. // continue ping-pong transmission
  960. rmt_tx_trans_desc_t *t = tx_chan->cur_trans;
  961. size_t encoded_symbols = t->transmitted_symbol_num;
  962. if (t->flags.encoding_done) {
  963. rmt_tx_mark_eof(tx_chan);
  964. encoded_symbols += 1;
  965. } else {
  966. encoded_symbols += rmt_encode_check_result(tx_chan, t);
  967. }
  968. t->transmitted_symbol_num = encoded_symbols;
  969. tx_chan->mem_end = tx_chan->ping_pong_symbols * 3 - tx_chan->mem_end; // mem_end equals to either ping_pong_symbols or ping_pong_symbols*2
  970. // tell DMA that we have a new descriptor attached
  971. gdma_append(dma_chan);
  972. }
  973. }
  974. return false;
  975. }
  976. #endif // SOC_RMT_SUPPORT_DMA