rmt_rx.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*
  2. * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <sys/cdefs.h>
  9. #include <sys/param.h>
  10. #include "sdkconfig.h"
  11. #if CONFIG_RMT_ENABLE_DEBUG_LOG
  12. // The local log level must be defined before including esp_log.h
  13. // Set the maximum log level for this source file
  14. #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  15. #endif
  16. #include "esp_log.h"
  17. #include "esp_check.h"
  18. #include "esp_memory_utils.h"
  19. #include "esp_rom_gpio.h"
  20. #include "soc/rmt_periph.h"
  21. #include "soc/rtc.h"
  22. #include "hal/rmt_ll.h"
  23. #include "hal/cache_hal.h"
  24. #include "hal/gpio_hal.h"
  25. #include "driver/gpio.h"
  26. #include "driver/rmt_rx.h"
  27. #include "rmt_private.h"
  28. #include "rom/cache.h"
  29. #define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
  30. static const char *TAG = "rmt";
  31. static esp_err_t rmt_del_rx_channel(rmt_channel_handle_t channel);
  32. static esp_err_t rmt_rx_demodulate_carrier(rmt_channel_handle_t channel, const rmt_carrier_config_t *config);
  33. static esp_err_t rmt_rx_enable(rmt_channel_handle_t channel);
  34. static esp_err_t rmt_rx_disable(rmt_channel_handle_t channel);
  35. static void rmt_rx_default_isr(void *args);
  36. #if SOC_RMT_SUPPORT_DMA
  37. static bool rmt_dma_rx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data);
  38. static void rmt_rx_mount_dma_buffer(rmt_dma_descriptor_t *desc_array, rmt_dma_descriptor_t *desc_array_nc, size_t array_size, const void *buffer, size_t buffer_size)
  39. {
  40. size_t prepared_length = 0;
  41. uint8_t *data = (uint8_t *)buffer;
  42. int dma_node_i = 0;
  43. rmt_dma_descriptor_t *desc = NULL;
  44. while (buffer_size > RMT_DMA_DESC_BUF_MAX_SIZE) {
  45. desc = &desc_array_nc[dma_node_i];
  46. desc->dw0.suc_eof = 0;
  47. desc->dw0.size = RMT_DMA_DESC_BUF_MAX_SIZE;
  48. desc->dw0.length = 0;
  49. desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  50. desc->buffer = &data[prepared_length];
  51. desc->next = &desc_array[dma_node_i + 1]; // note, we must use the cache address for the "next" pointer
  52. prepared_length += RMT_DMA_DESC_BUF_MAX_SIZE;
  53. buffer_size -= RMT_DMA_DESC_BUF_MAX_SIZE;
  54. dma_node_i++;
  55. }
  56. if (buffer_size) {
  57. desc = &desc_array_nc[dma_node_i];
  58. desc->dw0.suc_eof = 0;
  59. desc->dw0.size = buffer_size;
  60. desc->dw0.length = 0;
  61. desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  62. desc->buffer = &data[prepared_length];
  63. prepared_length += buffer_size;
  64. }
  65. desc->next = NULL; // one-off DMA chain
  66. }
  67. static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx_channel_config_t *config)
  68. {
  69. gdma_channel_alloc_config_t dma_chan_config = {
  70. .direction = GDMA_CHANNEL_DIRECTION_RX,
  71. };
  72. ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &rx_channel->base.dma_chan), TAG, "allocate RX DMA channel failed");
  73. gdma_rx_event_callbacks_t cbs = {
  74. .on_recv_eof = rmt_dma_rx_eof_cb,
  75. };
  76. gdma_register_rx_event_callbacks(rx_channel->base.dma_chan, &cbs, rx_channel);
  77. return ESP_OK;
  78. }
  79. #endif // SOC_RMT_SUPPORT_DMA
  80. static esp_err_t rmt_rx_register_to_group(rmt_rx_channel_t *rx_channel, const rmt_rx_channel_config_t *config)
  81. {
  82. size_t mem_block_num = 0;
  83. // start to search for a free channel
  84. // a channel can take up its neighbour's memory block, so the neighbour channel won't work, we should skip these "invaded" ones
  85. int channel_scan_start = RMT_RX_CHANNEL_OFFSET_IN_GROUP;
  86. int channel_scan_end = RMT_RX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_RX_CANDIDATES_PER_GROUP;
  87. if (config->flags.with_dma) {
  88. // for DMA mode, the memory block number is always 1; for non-DMA mode, memory block number is configured by user
  89. mem_block_num = 1;
  90. // Only the last channel has the DMA capability
  91. channel_scan_start = RMT_RX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_RX_CANDIDATES_PER_GROUP - 1;
  92. rx_channel->ping_pong_symbols = 0; // with DMA, we don't need to do ping-pong
  93. } else {
  94. // one channel can occupy multiple memory blocks
  95. mem_block_num = config->mem_block_symbols / SOC_RMT_MEM_WORDS_PER_CHANNEL;
  96. if (mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL < config->mem_block_symbols) {
  97. mem_block_num++;
  98. }
  99. rx_channel->ping_pong_symbols = mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL / 2;
  100. }
  101. rx_channel->base.mem_block_num = mem_block_num;
  102. // search free channel and then register to the group
  103. // memory blocks used by one channel must be continuous
  104. uint32_t channel_mask = (1 << mem_block_num) - 1;
  105. rmt_group_t *group = NULL;
  106. int channel_id = -1;
  107. for (int i = 0; i < SOC_RMT_GROUPS; i++) {
  108. group = rmt_acquire_group_handle(i);
  109. ESP_RETURN_ON_FALSE(group, ESP_ERR_NO_MEM, TAG, "no mem for group (%d)", i);
  110. portENTER_CRITICAL(&group->spinlock);
  111. for (int j = channel_scan_start; j < channel_scan_end; j++) {
  112. if (!(group->occupy_mask & (channel_mask << j))) {
  113. group->occupy_mask |= (channel_mask << j);
  114. // the channel ID should index from 0
  115. channel_id = j - RMT_RX_CHANNEL_OFFSET_IN_GROUP;
  116. group->rx_channels[channel_id] = rx_channel;
  117. break;
  118. }
  119. }
  120. portEXIT_CRITICAL(&group->spinlock);
  121. if (channel_id < 0) {
  122. // didn't find a capable channel in the group, don't forget to release the group handle
  123. rmt_release_group_handle(group);
  124. } else {
  125. rx_channel->base.channel_id = channel_id;
  126. rx_channel->base.channel_mask = channel_mask;
  127. rx_channel->base.group = group;
  128. break;
  129. }
  130. }
  131. ESP_RETURN_ON_FALSE(channel_id >= 0, ESP_ERR_NOT_FOUND, TAG, "no free rx channels");
  132. return ESP_OK;
  133. }
  134. static void rmt_rx_unregister_from_group(rmt_channel_t *channel, rmt_group_t *group)
  135. {
  136. portENTER_CRITICAL(&group->spinlock);
  137. group->rx_channels[channel->channel_id] = NULL;
  138. group->occupy_mask &= ~(channel->channel_mask << (channel->channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP));
  139. portEXIT_CRITICAL(&group->spinlock);
  140. // channel has a reference on group, release it now
  141. rmt_release_group_handle(group);
  142. }
  143. static esp_err_t rmt_rx_destroy(rmt_rx_channel_t *rx_channel)
  144. {
  145. if (rx_channel->base.intr) {
  146. ESP_RETURN_ON_ERROR(esp_intr_free(rx_channel->base.intr), TAG, "delete interrupt service failed");
  147. }
  148. if (rx_channel->base.pm_lock) {
  149. ESP_RETURN_ON_ERROR(esp_pm_lock_delete(rx_channel->base.pm_lock), TAG, "delete pm_lock failed");
  150. }
  151. #if SOC_RMT_SUPPORT_DMA
  152. if (rx_channel->base.dma_chan) {
  153. ESP_RETURN_ON_ERROR(gdma_del_channel(rx_channel->base.dma_chan), TAG, "delete dma channel failed");
  154. }
  155. #endif // SOC_RMT_SUPPORT_DMA
  156. if (rx_channel->base.group) {
  157. // de-register channel from RMT group
  158. rmt_rx_unregister_from_group(&rx_channel->base, rx_channel->base.group);
  159. }
  160. if (rx_channel->dma_nodes) {
  161. free(rx_channel->dma_nodes);
  162. }
  163. free(rx_channel);
  164. return ESP_OK;
  165. }
  166. esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_handle_t *ret_chan)
  167. {
  168. #if CONFIG_RMT_ENABLE_DEBUG_LOG
  169. esp_log_level_set(TAG, ESP_LOG_DEBUG);
  170. #endif
  171. esp_err_t ret = ESP_OK;
  172. rmt_rx_channel_t *rx_channel = NULL;
  173. // Check if priority is valid
  174. if (config->intr_priority) {
  175. ESP_GOTO_ON_FALSE((config->intr_priority) > 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid interrupt priority:%d", config->intr_priority);
  176. ESP_GOTO_ON_FALSE(1 << (config->intr_priority) & RMT_ALLOW_INTR_PRIORITY_MASK, ESP_ERR_INVALID_ARG, err, TAG, "invalid interrupt priority:%d", config->intr_priority);
  177. }
  178. ESP_GOTO_ON_FALSE(config && ret_chan && config->resolution_hz, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
  179. ESP_GOTO_ON_FALSE(GPIO_IS_VALID_GPIO(config->gpio_num), ESP_ERR_INVALID_ARG, err, TAG, "invalid GPIO number");
  180. ESP_GOTO_ON_FALSE((config->mem_block_symbols & 0x01) == 0 && config->mem_block_symbols >= SOC_RMT_MEM_WORDS_PER_CHANNEL,
  181. ESP_ERR_INVALID_ARG, err, TAG, "mem_block_symbols must be even and at least %d", SOC_RMT_MEM_WORDS_PER_CHANNEL);
  182. #if !SOC_RMT_SUPPORT_DMA
  183. ESP_GOTO_ON_FALSE(config->flags.with_dma == 0, ESP_ERR_NOT_SUPPORTED, err, TAG, "DMA not supported");
  184. #endif // SOC_RMT_SUPPORT_DMA
  185. // malloc channel memory
  186. uint32_t mem_caps = RMT_MEM_ALLOC_CAPS;
  187. rx_channel = heap_caps_calloc(1, sizeof(rmt_rx_channel_t), mem_caps);
  188. ESP_GOTO_ON_FALSE(rx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel");
  189. // create DMA descriptor
  190. size_t num_dma_nodes = 0;
  191. if (config->flags.with_dma) {
  192. mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
  193. num_dma_nodes = config->mem_block_symbols * sizeof(rmt_symbol_word_t) / RMT_DMA_DESC_BUF_MAX_SIZE + 1;
  194. // DMA descriptors must be placed in internal SRAM
  195. rx_channel->dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, num_dma_nodes, sizeof(rmt_dma_descriptor_t), mem_caps);
  196. ESP_GOTO_ON_FALSE(rx_channel->dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel DMA nodes");
  197. // we will use the non-cached address to manipulate the DMA descriptor, for simplicity
  198. rx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(rx_channel->dma_nodes);
  199. }
  200. rx_channel->num_dma_nodes = num_dma_nodes;
  201. // register the channel to group
  202. ESP_GOTO_ON_ERROR(rmt_rx_register_to_group(rx_channel, config), err, TAG, "register channel failed");
  203. rmt_group_t *group = rx_channel->base.group;
  204. rmt_hal_context_t *hal = &group->hal;
  205. int channel_id = rx_channel->base.channel_id;
  206. int group_id = group->group_id;
  207. // reset channel, make sure the RX engine is not working, and events are cleared
  208. portENTER_CRITICAL(&group->spinlock);
  209. rmt_hal_rx_channel_reset(&group->hal, channel_id);
  210. portEXIT_CRITICAL(&group->spinlock);
  211. // When channel receives an end-maker, a DMA in_suc_eof interrupt will be generated
  212. // So we don't rely on RMT interrupt any more, GDMA event callback is sufficient
  213. if (config->flags.with_dma) {
  214. #if SOC_RMT_SUPPORT_DMA
  215. ESP_GOTO_ON_ERROR(rmt_rx_init_dma_link(rx_channel, config), err, TAG, "install rx DMA failed");
  216. #endif // SOC_RMT_SUPPORT_DMA
  217. } else {
  218. // RMT interrupt is mandatory if the channel doesn't use DMA
  219. // --- install interrupt service
  220. // interrupt is mandatory to run basic RMT transactions, so it's not lazy installed in `rmt_tx_register_event_callbacks()`
  221. // 1-- Set user specified priority to `group->intr_priority`
  222. bool priority_conflict = rmt_set_intr_priority_to_group(group, config->intr_priority);
  223. ESP_GOTO_ON_FALSE(!priority_conflict, ESP_ERR_INVALID_ARG, err, TAG, "intr_priority conflict");
  224. // 2-- Get interrupt allocation flag
  225. int isr_flags = rmt_get_isr_flags(group);
  226. // 3-- Allocate interrupt using isr_flag
  227. ret = esp_intr_alloc_intrstatus(rmt_periph_signals.groups[group_id].irq, isr_flags,
  228. (uint32_t)rmt_ll_get_interrupt_status_reg(hal->regs),
  229. RMT_LL_EVENT_RX_MASK(channel_id), rmt_rx_default_isr, rx_channel, &rx_channel->base.intr);
  230. ESP_GOTO_ON_ERROR(ret, err, TAG, "install rx interrupt failed");
  231. }
  232. // select the clock source
  233. ESP_GOTO_ON_ERROR(rmt_select_periph_clock(&rx_channel->base, config->clk_src), err, TAG, "set group clock failed");
  234. // set channel clock resolution
  235. uint32_t real_div = group->resolution_hz / config->resolution_hz;
  236. rmt_ll_rx_set_channel_clock_div(hal->regs, channel_id, real_div);
  237. // resolution loss due to division, calculate the real resolution
  238. rx_channel->base.resolution_hz = group->resolution_hz / real_div;
  239. if (rx_channel->base.resolution_hz != config->resolution_hz) {
  240. ESP_LOGW(TAG, "channel resolution loss, real=%"PRIu32, rx_channel->base.resolution_hz);
  241. }
  242. rmt_ll_rx_set_mem_blocks(hal->regs, channel_id, rx_channel->base.mem_block_num);
  243. rmt_ll_rx_set_mem_owner(hal->regs, channel_id, RMT_LL_MEM_OWNER_HW);
  244. #if SOC_RMT_SUPPORT_RX_PINGPONG
  245. rmt_ll_rx_set_limit(hal->regs, channel_id, rx_channel->ping_pong_symbols);
  246. // always enable rx wrap, both DMA mode and ping-pong mode rely this feature
  247. rmt_ll_rx_enable_wrap(hal->regs, channel_id, true);
  248. #endif
  249. #if SOC_RMT_SUPPORT_RX_DEMODULATION
  250. // disable carrier demodulation by default, can reenable by `rmt_apply_carrier()`
  251. rmt_ll_rx_enable_carrier_demodulation(hal->regs, channel_id, false);
  252. #endif
  253. // GPIO Matrix/MUX configuration
  254. rx_channel->base.gpio_num = config->gpio_num;
  255. gpio_config_t gpio_conf = {
  256. .intr_type = GPIO_INTR_DISABLE,
  257. // also enable the input path is `io_loop_back` is on, this is useful for debug
  258. .mode = GPIO_MODE_INPUT | (config->flags.io_loop_back ? GPIO_MODE_OUTPUT : 0),
  259. .pull_down_en = false,
  260. .pull_up_en = true,
  261. .pin_bit_mask = 1ULL << config->gpio_num,
  262. };
  263. ESP_GOTO_ON_ERROR(gpio_config(&gpio_conf), err, TAG, "config GPIO failed");
  264. esp_rom_gpio_connect_in_signal(config->gpio_num,
  265. rmt_periph_signals.groups[group_id].channels[channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP].rx_sig,
  266. config->flags.invert_in);
  267. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_num], PIN_FUNC_GPIO);
  268. // initialize other members of rx channel
  269. portMUX_INITIALIZE(&rx_channel->base.spinlock);
  270. atomic_init(&rx_channel->base.fsm, RMT_FSM_INIT);
  271. rx_channel->base.direction = RMT_CHANNEL_DIRECTION_RX;
  272. rx_channel->base.hw_mem_base = &RMTMEM.channels[channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP].symbols[0];
  273. // polymorphic methods
  274. rx_channel->base.del = rmt_del_rx_channel;
  275. rx_channel->base.set_carrier_action = rmt_rx_demodulate_carrier;
  276. rx_channel->base.enable = rmt_rx_enable;
  277. rx_channel->base.disable = rmt_rx_disable;
  278. // return general channel handle
  279. *ret_chan = &rx_channel->base;
  280. ESP_LOGD(TAG, "new rx channel(%d,%d) at %p, gpio=%d, res=%"PRIu32"Hz, hw_mem_base=%p, ping_pong_size=%d",
  281. group_id, channel_id, rx_channel, config->gpio_num, rx_channel->base.resolution_hz,
  282. rx_channel->base.hw_mem_base, rx_channel->ping_pong_symbols);
  283. return ESP_OK;
  284. err:
  285. if (rx_channel) {
  286. rmt_rx_destroy(rx_channel);
  287. }
  288. return ret;
  289. }
  290. static esp_err_t rmt_del_rx_channel(rmt_channel_handle_t channel)
  291. {
  292. ESP_RETURN_ON_FALSE(atomic_load(&channel->fsm) == RMT_FSM_INIT,
  293. ESP_ERR_INVALID_STATE, TAG, "channel not in init state");
  294. rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base);
  295. rmt_group_t *group = channel->group;
  296. int group_id = group->group_id;
  297. int channel_id = channel->channel_id;
  298. ESP_LOGD(TAG, "del rx channel(%d,%d)", group_id, channel_id);
  299. // recycle memory resource
  300. ESP_RETURN_ON_ERROR(rmt_rx_destroy(rx_chan), TAG, "destroy rx channel failed");
  301. return ESP_OK;
  302. }
  303. esp_err_t rmt_rx_register_event_callbacks(rmt_channel_handle_t channel, const rmt_rx_event_callbacks_t *cbs, void *user_data)
  304. {
  305. ESP_RETURN_ON_FALSE(channel && cbs, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  306. ESP_RETURN_ON_FALSE(channel->direction == RMT_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, TAG, "invalid channel direction");
  307. rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base);
  308. #if CONFIG_RMT_ISR_IRAM_SAFE
  309. if (cbs->on_recv_done) {
  310. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG, TAG, "on_recv_done callback not in IRAM");
  311. }
  312. if (user_data) {
  313. ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, TAG, "user context not in internal RAM");
  314. }
  315. #endif
  316. rx_chan->on_recv_done = cbs->on_recv_done;
  317. rx_chan->user_data = user_data;
  318. return ESP_OK;
  319. }
  320. esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_size, const rmt_receive_config_t *config)
  321. {
  322. ESP_RETURN_ON_FALSE_ISR(channel && buffer && buffer_size && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
  323. ESP_RETURN_ON_FALSE_ISR(channel->direction == RMT_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, TAG, "invalid channel direction");
  324. rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base);
  325. if (channel->dma_chan) {
  326. ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "buffer must locate in internal RAM for DMA use");
  327. #if CONFIG_IDF_TARGET_ESP32P4
  328. uint32_t data_cache_line_mask = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA) - 1;
  329. ESP_RETURN_ON_FALSE_ISR(((uintptr_t)buffer & data_cache_line_mask) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer must be aligned to cache line size");
  330. ESP_RETURN_ON_FALSE_ISR((buffer_size & data_cache_line_mask) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer size must be aligned to cache line size");
  331. #endif
  332. }
  333. if (channel->dma_chan) {
  334. ESP_RETURN_ON_FALSE_ISR(buffer_size <= rx_chan->num_dma_nodes * RMT_DMA_DESC_BUF_MAX_SIZE,
  335. ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity");
  336. }
  337. rmt_group_t *group = channel->group;
  338. rmt_hal_context_t *hal = &group->hal;
  339. int channel_id = channel->channel_id;
  340. uint32_t filter_reg_value = ((uint64_t)group->resolution_hz * config->signal_range_min_ns) / 1000000000UL;
  341. uint32_t idle_reg_value = ((uint64_t)channel->resolution_hz * config->signal_range_max_ns) / 1000000000UL;
  342. ESP_RETURN_ON_FALSE_ISR(filter_reg_value <= RMT_LL_MAX_FILTER_VALUE, ESP_ERR_INVALID_ARG, TAG, "signal_range_min_ns too big");
  343. ESP_RETURN_ON_FALSE_ISR(idle_reg_value <= RMT_LL_MAX_IDLE_VALUE, ESP_ERR_INVALID_ARG, TAG, "signal_range_max_ns too big");
  344. // check if we're in a proper state to start the receiver
  345. rmt_fsm_t expected_fsm = RMT_FSM_ENABLE;
  346. ESP_RETURN_ON_FALSE_ISR(atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_RUN_WAIT),
  347. ESP_ERR_INVALID_STATE, TAG, "channel not in enable state");
  348. // fill in the transaction descriptor
  349. rmt_rx_trans_desc_t *t = &rx_chan->trans_desc;
  350. t->buffer = buffer;
  351. t->buffer_size = buffer_size;
  352. t->received_symbol_num = 0;
  353. t->copy_dest_off = 0;
  354. if (channel->dma_chan) {
  355. #if SOC_RMT_SUPPORT_DMA
  356. rmt_rx_mount_dma_buffer(rx_chan->dma_nodes, rx_chan->dma_nodes_nc, rx_chan->num_dma_nodes, buffer, buffer_size);
  357. gdma_reset(channel->dma_chan);
  358. gdma_start(channel->dma_chan, (intptr_t)rx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
  359. #endif
  360. }
  361. rx_chan->mem_off = 0;
  362. portENTER_CRITICAL_SAFE(&channel->spinlock);
  363. // reset memory writer offset
  364. rmt_ll_rx_reset_pointer(hal->regs, channel_id);
  365. rmt_ll_rx_set_mem_owner(hal->regs, channel_id, RMT_LL_MEM_OWNER_HW);
  366. // set sampling parameters of incoming signals
  367. rmt_ll_rx_set_filter_thres(hal->regs, channel_id, filter_reg_value);
  368. rmt_ll_rx_enable_filter(hal->regs, channel_id, config->signal_range_min_ns != 0);
  369. rmt_ll_rx_set_idle_thres(hal->regs, channel_id, idle_reg_value);
  370. // turn on RMT RX machine
  371. rmt_ll_rx_enable(hal->regs, channel_id, true);
  372. portEXIT_CRITICAL_SAFE(&channel->spinlock);
  373. // saying we're in running state, this state will last until the receiving is done
  374. // i.e., we will switch back to the enable state in the receive done interrupt handler
  375. atomic_store(&channel->fsm, RMT_FSM_RUN);
  376. return ESP_OK;
  377. }
  378. static esp_err_t rmt_rx_demodulate_carrier(rmt_channel_handle_t channel, const rmt_carrier_config_t *config)
  379. {
  380. #if !SOC_RMT_SUPPORT_RX_DEMODULATION
  381. ESP_RETURN_ON_FALSE(false, ESP_ERR_NOT_SUPPORTED, TAG, "rx demodulation not supported");
  382. #else
  383. rmt_group_t *group = channel->group;
  384. rmt_hal_context_t *hal = &group->hal;
  385. int group_id = group->group_id;
  386. int channel_id = channel->channel_id;
  387. uint32_t real_frequency = 0;
  388. if (config && config->frequency_hz) {
  389. // carrier demodulation module works base on channel clock (this is different from TX carrier modulation mode)
  390. uint32_t total_ticks = channel->resolution_hz / config->frequency_hz; // Note this division operation will lose precision
  391. uint32_t high_ticks = total_ticks * config->duty_cycle;
  392. uint32_t low_ticks = total_ticks - high_ticks;
  393. portENTER_CRITICAL(&channel->spinlock);
  394. rmt_ll_rx_set_carrier_level(hal->regs, channel_id, !config->flags.polarity_active_low);
  395. rmt_ll_rx_set_carrier_high_low_ticks(hal->regs, channel_id, high_ticks, low_ticks);
  396. portEXIT_CRITICAL(&channel->spinlock);
  397. // save real carrier frequency
  398. real_frequency = channel->resolution_hz / (high_ticks + low_ticks);
  399. }
  400. // enable/disable carrier demodulation
  401. portENTER_CRITICAL(&channel->spinlock);
  402. rmt_ll_rx_enable_carrier_demodulation(hal->regs, channel_id, real_frequency > 0);
  403. portEXIT_CRITICAL(&channel->spinlock);
  404. if (real_frequency > 0) {
  405. ESP_LOGD(TAG, "enable carrier demodulation for channel(%d,%d), freq=%"PRIu32"Hz", group_id, channel_id, real_frequency);
  406. } else {
  407. ESP_LOGD(TAG, "disable carrier demodulation for channel(%d, %d)", group_id, channel_id);
  408. }
  409. return ESP_OK;
  410. #endif
  411. }
  412. static esp_err_t rmt_rx_enable(rmt_channel_handle_t channel)
  413. {
  414. // can only enable the channel when it's in "init" state
  415. rmt_fsm_t expected_fsm = RMT_FSM_INIT;
  416. ESP_RETURN_ON_FALSE(atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_ENABLE_WAIT),
  417. ESP_ERR_INVALID_STATE, TAG, "channel not in init state");
  418. rmt_group_t *group = channel->group;
  419. rmt_hal_context_t *hal = &group->hal;
  420. int channel_id = channel->channel_id;
  421. // acquire power manager lock
  422. if (channel->pm_lock) {
  423. esp_pm_lock_acquire(channel->pm_lock);
  424. }
  425. if (channel->dma_chan) {
  426. #if SOC_RMT_SUPPORT_DMA
  427. // enable the DMA access mode
  428. portENTER_CRITICAL(&channel->spinlock);
  429. rmt_ll_rx_enable_dma(hal->regs, channel_id, true);
  430. portEXIT_CRITICAL(&channel->spinlock);
  431. gdma_connect(channel->dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_RMT, 0));
  432. #endif // SOC_RMT_SUPPORT_DMA
  433. } else {
  434. portENTER_CRITICAL(&group->spinlock);
  435. rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_RX_MASK(channel_id), true);
  436. portEXIT_CRITICAL(&group->spinlock);
  437. }
  438. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  439. return ESP_OK;
  440. }
  441. static esp_err_t rmt_rx_disable(rmt_channel_handle_t channel)
  442. {
  443. // can disable the channel when it's in `enable` or `run` state
  444. bool valid_state = false;
  445. rmt_fsm_t expected_fsm = RMT_FSM_ENABLE;
  446. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_INIT_WAIT)) {
  447. valid_state = true;
  448. }
  449. expected_fsm = RMT_FSM_RUN;
  450. if (atomic_compare_exchange_strong(&channel->fsm, &expected_fsm, RMT_FSM_INIT_WAIT)) {
  451. valid_state = true;
  452. }
  453. ESP_RETURN_ON_FALSE(valid_state, ESP_ERR_INVALID_STATE, TAG, "channel not in enable or run state");
  454. rmt_group_t *group = channel->group;
  455. rmt_hal_context_t *hal = &group->hal;
  456. int channel_id = channel->channel_id;
  457. portENTER_CRITICAL(&channel->spinlock);
  458. rmt_ll_rx_enable(hal->regs, channel_id, false);
  459. portEXIT_CRITICAL(&channel->spinlock);
  460. if (channel->dma_chan) {
  461. #if SOC_RMT_SUPPORT_DMA
  462. gdma_stop(channel->dma_chan);
  463. gdma_disconnect(channel->dma_chan);
  464. portENTER_CRITICAL(&channel->spinlock);
  465. rmt_ll_rx_enable_dma(hal->regs, channel_id, false);
  466. portEXIT_CRITICAL(&channel->spinlock);
  467. #endif
  468. } else {
  469. portENTER_CRITICAL(&group->spinlock);
  470. rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_RX_MASK(channel_id), false);
  471. rmt_ll_clear_interrupt_status(hal->regs, RMT_LL_EVENT_RX_MASK(channel_id));
  472. portEXIT_CRITICAL(&group->spinlock);
  473. }
  474. // release power manager lock
  475. if (channel->pm_lock) {
  476. esp_pm_lock_release(channel->pm_lock);
  477. }
  478. // now we can switch the state to init
  479. atomic_store(&channel->fsm, RMT_FSM_INIT);
  480. return ESP_OK;
  481. }
  482. static size_t IRAM_ATTR rmt_copy_symbols(rmt_symbol_word_t *symbol_stream, size_t symbol_num, void *buffer, size_t offset, size_t buffer_size)
  483. {
  484. size_t mem_want = symbol_num * sizeof(rmt_symbol_word_t);
  485. size_t mem_have = buffer_size - offset;
  486. size_t copy_size = MIN(mem_want, mem_have);
  487. // do memory copy
  488. memcpy(buffer + offset, symbol_stream, copy_size);
  489. return copy_size;
  490. }
  491. static bool IRAM_ATTR rmt_isr_handle_rx_done(rmt_rx_channel_t *rx_chan)
  492. {
  493. rmt_channel_t *channel = &rx_chan->base;
  494. rmt_group_t *group = channel->group;
  495. rmt_hal_context_t *hal = &group->hal;
  496. uint32_t channel_id = channel->channel_id;
  497. rmt_rx_trans_desc_t *trans_desc = &rx_chan->trans_desc;
  498. bool need_yield = false;
  499. rmt_ll_clear_interrupt_status(hal->regs, RMT_LL_EVENT_RX_DONE(channel_id));
  500. portENTER_CRITICAL_ISR(&channel->spinlock);
  501. // disable the RX engine, it will be enabled again when next time user calls `rmt_receive()`
  502. rmt_ll_rx_enable(hal->regs, channel_id, false);
  503. uint32_t offset = rmt_ll_rx_get_memory_writer_offset(hal->regs, channel_id);
  504. // sanity check
  505. assert(offset >= rx_chan->mem_off);
  506. rmt_ll_rx_set_mem_owner(hal->regs, channel_id, RMT_LL_MEM_OWNER_SW);
  507. // copy the symbols to user space
  508. size_t stream_symbols = offset - rx_chan->mem_off;
  509. size_t copy_size = rmt_copy_symbols(channel->hw_mem_base + rx_chan->mem_off, stream_symbols,
  510. trans_desc->buffer, trans_desc->copy_dest_off, trans_desc->buffer_size);
  511. rmt_ll_rx_set_mem_owner(hal->regs, channel_id, RMT_LL_MEM_OWNER_HW);
  512. portEXIT_CRITICAL_ISR(&channel->spinlock);
  513. #if !SOC_RMT_SUPPORT_RX_PINGPONG
  514. // for chips doesn't support ping-pong RX, we should check whether the receiver has encountered with a long frame,
  515. // whose length is longer than the channel capacity
  516. if (rmt_ll_rx_get_interrupt_status_raw(hal->regs, channel_id) & RMT_LL_EVENT_RX_ERROR(channel_id)) {
  517. portENTER_CRITICAL_ISR(&channel->spinlock);
  518. rmt_ll_rx_reset_pointer(hal->regs, channel_id);
  519. portEXIT_CRITICAL_ISR(&channel->spinlock);
  520. // this clear operation can only take effect after we copy out the received data and reset the pointer
  521. rmt_ll_clear_interrupt_status(hal->regs, RMT_LL_EVENT_RX_ERROR(channel_id));
  522. ESP_DRAM_LOGE(TAG, "hw buffer too small, received symbols truncated");
  523. }
  524. #endif // !SOC_RMT_SUPPORT_RX_PINGPONG
  525. // check whether all symbols are copied
  526. if (copy_size != stream_symbols * sizeof(rmt_symbol_word_t)) {
  527. ESP_DRAM_LOGE(TAG, "user buffer too small, received symbols truncated");
  528. }
  529. trans_desc->copy_dest_off += copy_size;
  530. trans_desc->received_symbol_num += copy_size / sizeof(rmt_symbol_word_t);
  531. // switch back to the enable state, then user can call `rmt_receive` to start a new receive
  532. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  533. // notify the user with receive RMT symbols
  534. if (rx_chan->on_recv_done) {
  535. rmt_rx_done_event_data_t edata = {
  536. .received_symbols = trans_desc->buffer,
  537. .num_symbols = trans_desc->received_symbol_num,
  538. };
  539. if (rx_chan->on_recv_done(channel, &edata, rx_chan->user_data)) {
  540. need_yield = true;
  541. }
  542. }
  543. return need_yield;
  544. }
  545. #if SOC_RMT_SUPPORT_RX_PINGPONG
  546. static bool IRAM_ATTR rmt_isr_handle_rx_threshold(rmt_rx_channel_t *rx_chan)
  547. {
  548. rmt_channel_t *channel = &rx_chan->base;
  549. rmt_group_t *group = channel->group;
  550. rmt_hal_context_t *hal = &group->hal;
  551. uint32_t channel_id = channel->channel_id;
  552. rmt_rx_trans_desc_t *trans_desc = &rx_chan->trans_desc;
  553. rmt_ll_clear_interrupt_status(hal->regs, RMT_LL_EVENT_RX_THRES(channel_id));
  554. portENTER_CRITICAL_ISR(&channel->spinlock);
  555. rmt_ll_rx_set_mem_owner(hal->regs, channel_id, RMT_LL_MEM_OWNER_SW);
  556. // copy the symbols to user space
  557. size_t copy_size = rmt_copy_symbols(channel->hw_mem_base + rx_chan->mem_off, rx_chan->ping_pong_symbols,
  558. trans_desc->buffer, trans_desc->copy_dest_off, trans_desc->buffer_size);
  559. rmt_ll_rx_set_mem_owner(hal->regs, channel_id, RMT_LL_MEM_OWNER_HW);
  560. portEXIT_CRITICAL_ISR(&channel->spinlock);
  561. // check whether all symbols are copied
  562. if (copy_size != rx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
  563. ESP_DRAM_LOGE(TAG, "received symbols truncated");
  564. }
  565. trans_desc->copy_dest_off += copy_size;
  566. trans_desc->received_symbol_num += copy_size / sizeof(rmt_symbol_word_t);
  567. // update the hw memory offset, where stores the next RMT symbols to copy
  568. rx_chan->mem_off = rx_chan->ping_pong_symbols - rx_chan->mem_off;
  569. return false;
  570. }
  571. #endif // SOC_RMT_SUPPORT_RX_PINGPONG
  572. static void IRAM_ATTR rmt_rx_default_isr(void *args)
  573. {
  574. rmt_rx_channel_t *rx_chan = (rmt_rx_channel_t *)args;
  575. rmt_channel_t *channel = &rx_chan->base;
  576. rmt_group_t *group = channel->group;
  577. rmt_hal_context_t *hal = &group->hal;
  578. uint32_t channel_id = channel->channel_id;
  579. bool need_yield = false;
  580. uint32_t status = rmt_ll_rx_get_interrupt_status(hal->regs, channel_id);
  581. #if SOC_RMT_SUPPORT_RX_PINGPONG
  582. // RX threshold interrupt
  583. if (status & RMT_LL_EVENT_RX_THRES(channel_id)) {
  584. if (rmt_isr_handle_rx_threshold(rx_chan)) {
  585. need_yield = true;
  586. }
  587. }
  588. #endif // SOC_RMT_SUPPORT_RX_PINGPONG
  589. // RX end interrupt
  590. if (status & RMT_LL_EVENT_RX_DONE(channel_id)) {
  591. if (rmt_isr_handle_rx_done(rx_chan)) {
  592. need_yield = true;
  593. }
  594. }
  595. if (need_yield) {
  596. portYIELD_FROM_ISR();
  597. }
  598. }
  599. #if SOC_RMT_SUPPORT_DMA
  600. static size_t IRAM_ATTR rmt_rx_get_received_symbol_num_from_dma(rmt_dma_descriptor_t *desc_nc)
  601. {
  602. size_t received_bytes = 0;
  603. while (desc_nc) {
  604. received_bytes += desc_nc->dw0.length;
  605. desc_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(desc_nc->next);
  606. }
  607. received_bytes = ALIGN_UP(received_bytes, sizeof(rmt_symbol_word_t));
  608. return received_bytes / sizeof(rmt_symbol_word_t);
  609. }
  610. static bool IRAM_ATTR rmt_dma_rx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
  611. {
  612. bool need_yield = false;
  613. rmt_rx_channel_t *rx_chan = (rmt_rx_channel_t *)user_data;
  614. rmt_channel_t *channel = &rx_chan->base;
  615. rmt_group_t *group = channel->group;
  616. rmt_hal_context_t *hal = &group->hal;
  617. rmt_rx_trans_desc_t *trans_desc = &rx_chan->trans_desc;
  618. uint32_t channel_id = channel->channel_id;
  619. portENTER_CRITICAL_ISR(&channel->spinlock);
  620. // disable the RX engine, it will be enabled again in the next `rmt_receive()`
  621. rmt_ll_rx_enable(hal->regs, channel_id, false);
  622. portEXIT_CRITICAL_ISR(&channel->spinlock);
  623. #if CONFIG_IDF_TARGET_ESP32P4
  624. int invalidate_map = CACHE_MAP_L1_DCACHE;
  625. if (esp_ptr_external_ram((const void *)trans_desc->buffer)) {
  626. invalidate_map |= CACHE_MAP_L2_CACHE;
  627. }
  628. Cache_Invalidate_Addr(invalidate_map, (uint32_t)trans_desc->buffer, trans_desc->buffer_size);
  629. #endif
  630. // switch back to the enable state, then user can call `rmt_receive` to start a new receive
  631. atomic_store(&channel->fsm, RMT_FSM_ENABLE);
  632. if (rx_chan->on_recv_done) {
  633. rmt_rx_done_event_data_t edata = {
  634. .received_symbols = trans_desc->buffer,
  635. .num_symbols = rmt_rx_get_received_symbol_num_from_dma(rx_chan->dma_nodes_nc),
  636. };
  637. if (rx_chan->on_recv_done(channel, &edata, rx_chan->user_data)) {
  638. need_yield = true;
  639. }
  640. }
  641. return need_yield;
  642. }
  643. #endif // SOC_RMT_SUPPORT_DMA