i2s_common.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <string.h>
  7. #include <stdbool.h>
  8. #include "freertos/FreeRTOS.h"
  9. #include "freertos/queue.h"
  10. #include "freertos/task.h"
  11. #include "sdkconfig.h"
  12. #if CONFIG_I2S_ENABLE_DEBUG_LOG
  13. // The local log level must be defined before including esp_log.h
  14. // Set the maximum log level for this source file
  15. #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  16. #endif
  17. #include "esp_log.h"
  18. #include "soc/i2s_periph.h"
  19. #include "soc/soc_caps.h"
  20. #include "hal/gpio_hal.h"
  21. #include "hal/i2s_hal.h"
  22. #if SOC_I2S_SUPPORTS_ADC_DAC
  23. #include "hal/adc_ll.h"
  24. #include "driver/adc_i2s_legacy.h"
  25. #endif
  26. #if SOC_I2S_SUPPORTS_APLL
  27. #include "clk_ctrl_os.h"
  28. #endif
  29. #include "esp_private/i2s_platform.h"
  30. #include "esp_private/periph_ctrl.h"
  31. #include "driver/gpio.h"
  32. #include "driver/i2s_common.h"
  33. #include "i2s_private.h"
  34. #include "clk_ctrl_os.h"
  35. #include "esp_intr_alloc.h"
  36. #include "esp_check.h"
  37. #include "esp_attr.h"
  38. #include "esp_rom_gpio.h"
  39. #include "esp_memory_utils.h"
  40. /* The actual max size of DMA buffer is 4095
  41. * Set 4092 here to align with 4-byte, so that the position of the slot data in the buffer will be relatively fixed */
  42. #define I2S_DMA_BUFFER_MAX_SIZE (4092)
  43. // If ISR handler is allowed to run whilst cache is disabled,
  44. // Make sure all the code and related variables used by the handler are in the SRAM
  45. #if CONFIG_I2S_ISR_IRAM_SAFE
  46. #define I2S_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_IRAM | ESP_INTR_FLAG_INTRDISABLED | ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED)
  47. #define I2S_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
  48. #else
  49. #define I2S_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_INTRDISABLED | ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED)
  50. #define I2S_MEM_ALLOC_CAPS MALLOC_CAP_DEFAULT
  51. #endif //CONFIG_I2S_ISR_IRAM_SAFE
  52. #define I2S_DMA_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA)
  53. /**
  54. * @brief Global i2s platform object
  55. * @note For saving all the I2S related information
  56. */
  57. i2s_platform_t g_i2s = {
  58. .spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
  59. .controller[0 ... (SOC_I2S_NUM - 1)] = NULL, // groups will be lazy installed
  60. .comp_name[0 ... (SOC_I2S_NUM - 1)] = NULL,
  61. };
  62. static const char *TAG = "i2s_common";
  63. /*---------------------------------------------------------------------------
  64. I2S Static APIs
  65. ----------------------------------------------------------------------------
  66. Scope: This file only
  67. ----------------------------------------------------------------------------*/
  68. static void i2s_tx_channel_start(i2s_chan_handle_t handle)
  69. {
  70. i2s_hal_tx_reset(&(handle->controller->hal));
  71. #if SOC_GDMA_SUPPORTED
  72. gdma_reset((handle->dma.dma_chan));
  73. #else
  74. i2s_hal_tx_reset_dma(&(handle->controller->hal));
  75. #endif
  76. i2s_hal_tx_reset_fifo(&(handle->controller->hal));
  77. #if SOC_GDMA_SUPPORTED
  78. gdma_start((handle->dma.dma_chan), (uint32_t) handle->dma.desc[0]);
  79. #else
  80. esp_intr_enable(handle->dma.dma_chan);
  81. i2s_hal_tx_enable_intr(&(handle->controller->hal));
  82. i2s_hal_tx_enable_dma(&(handle->controller->hal));
  83. i2s_hal_tx_start_link(&(handle->controller->hal), (uint32_t) handle->dma.desc[0]);
  84. #endif
  85. i2s_hal_tx_start(&(handle->controller->hal));
  86. }
  87. static void i2s_rx_channel_start(i2s_chan_handle_t handle)
  88. {
  89. i2s_hal_rx_reset(&(handle->controller->hal));
  90. #if SOC_GDMA_SUPPORTED
  91. gdma_reset(handle->dma.dma_chan);
  92. #else
  93. i2s_hal_rx_reset_dma(&(handle->controller->hal));
  94. #endif
  95. i2s_hal_rx_reset_fifo(&(handle->controller->hal));
  96. #if SOC_GDMA_SUPPORTED
  97. gdma_start(handle->dma.dma_chan, (uint32_t) handle->dma.desc[0]);
  98. #else
  99. esp_intr_enable(handle->dma.dma_chan);
  100. i2s_hal_rx_enable_intr(&(handle->controller->hal));
  101. i2s_hal_rx_enable_dma(&(handle->controller->hal));
  102. i2s_hal_rx_start_link(&(handle->controller->hal), (uint32_t) handle->dma.desc[0]);
  103. #endif
  104. i2s_hal_rx_start(&(handle->controller->hal));
  105. }
  106. static void i2s_tx_channel_stop(i2s_chan_handle_t handle)
  107. {
  108. i2s_hal_tx_stop(&(handle->controller->hal));
  109. #if SOC_GDMA_SUPPORTED
  110. gdma_stop(handle->dma.dma_chan);
  111. #else
  112. i2s_hal_tx_stop_link(&(handle->controller->hal));
  113. i2s_hal_tx_disable_intr(&(handle->controller->hal));
  114. i2s_hal_tx_disable_dma(&(handle->controller->hal));
  115. esp_intr_disable(handle->dma.dma_chan);
  116. #endif
  117. }
  118. static void i2s_rx_channel_stop(i2s_chan_handle_t handle)
  119. {
  120. i2s_hal_rx_stop(&(handle->controller->hal));
  121. #if SOC_GDMA_SUPPORTED
  122. gdma_stop(handle->dma.dma_chan);
  123. #else
  124. i2s_hal_rx_stop_link(&(handle->controller->hal));
  125. i2s_hal_rx_disable_intr(&(handle->controller->hal));
  126. i2s_hal_rx_disable_dma(&(handle->controller->hal));
  127. esp_intr_disable(handle->dma.dma_chan);
  128. #endif
  129. }
  130. static esp_err_t i2s_destroy_controller_obj(i2s_controller_t **i2s_obj)
  131. {
  132. I2S_NULL_POINTER_CHECK(TAG, i2s_obj);
  133. I2S_NULL_POINTER_CHECK(TAG, *i2s_obj);
  134. ESP_RETURN_ON_FALSE(!(*i2s_obj)->rx_chan && !(*i2s_obj)->tx_chan,
  135. ESP_ERR_INVALID_STATE, TAG,
  136. "there still have channels under this i2s controller");
  137. int id = (*i2s_obj)->id;
  138. #if SOC_I2S_HW_VERSION_1
  139. i2s_ll_enable_dma((*i2s_obj)->hal.dev, false);
  140. #endif
  141. free(*i2s_obj);
  142. *i2s_obj = NULL;
  143. return i2s_platform_release_occupation(id);
  144. }
  145. /**
  146. * @brief Acquire i2s controller object
  147. *
  148. * @param id i2s port id
  149. * @param search_reverse reverse the sequence of port acquirement
  150. * set false to acquire from I2S_NUM_0 first
  151. * set true to acquire from SOC_I2S_NUM - 1 first
  152. * @return
  153. * - pointer of acquired i2s controller object
  154. */
  155. static i2s_controller_t *i2s_acquire_controller_obj(int id)
  156. {
  157. if (id < 0 || id >= SOC_I2S_NUM) {
  158. return NULL;
  159. }
  160. /* pre-alloc controller object */
  161. i2s_controller_t *pre_alloc = (i2s_controller_t *)heap_caps_calloc(1, sizeof(i2s_controller_t), I2S_MEM_ALLOC_CAPS);
  162. if (pre_alloc == NULL) {
  163. return NULL;
  164. }
  165. pre_alloc->id = id;
  166. i2s_hal_init(&pre_alloc->hal, id);
  167. pre_alloc->full_duplex = false;
  168. pre_alloc->tx_chan = NULL;
  169. pre_alloc->rx_chan = NULL;
  170. pre_alloc->mclk = I2S_GPIO_UNUSED;
  171. i2s_controller_t *i2s_obj = NULL;
  172. /* Try to occupy this i2s controller */
  173. if (i2s_platform_acquire_occupation(id, "i2s_driver") == ESP_OK) {
  174. portENTER_CRITICAL(&g_i2s.spinlock);
  175. i2s_obj = pre_alloc;
  176. g_i2s.controller[id] = i2s_obj;
  177. portEXIT_CRITICAL(&g_i2s.spinlock);
  178. #if SOC_I2S_SUPPORTS_ADC_DAC
  179. if (id == I2S_NUM_0) {
  180. adc_ll_digi_set_data_source(ADC_I2S_DATA_SRC_IO_SIG);
  181. }
  182. #endif
  183. } else {
  184. free(pre_alloc);
  185. portENTER_CRITICAL(&g_i2s.spinlock);
  186. if (g_i2s.controller[id]) {
  187. i2s_obj = g_i2s.controller[id];
  188. }
  189. portEXIT_CRITICAL(&g_i2s.spinlock);
  190. if (i2s_obj == NULL) {
  191. ESP_LOGE(TAG, "i2s%d might be occupied by other component", id);
  192. }
  193. }
  194. return i2s_obj;
  195. }
  196. static inline bool i2s_take_available_channel(i2s_controller_t *i2s_obj, uint8_t chan_search_mask)
  197. {
  198. bool is_available = false;
  199. #if SOC_I2S_HW_VERSION_1
  200. /* In ESP32 and ESP32-S2, tx channel and rx channel are not totally separated
  201. * Take both two channels in case one channel can affect another
  202. */
  203. chan_search_mask = I2S_DIR_RX | I2S_DIR_TX;
  204. #endif
  205. portENTER_CRITICAL(&g_i2s.spinlock);
  206. if (!(chan_search_mask & i2s_obj->chan_occupancy)) {
  207. i2s_obj->chan_occupancy |= chan_search_mask;
  208. is_available = true;
  209. }
  210. portEXIT_CRITICAL(&g_i2s.spinlock);
  211. return is_available;
  212. }
  213. static esp_err_t i2s_register_channel(i2s_controller_t *i2s_obj, i2s_dir_t dir, uint32_t desc_num)
  214. {
  215. I2S_NULL_POINTER_CHECK(TAG, i2s_obj);
  216. esp_err_t ret = ESP_OK;
  217. i2s_chan_handle_t new_chan = (i2s_chan_handle_t)heap_caps_calloc(1, sizeof(struct i2s_channel_t), I2S_MEM_ALLOC_CAPS);
  218. ESP_RETURN_ON_FALSE(new_chan, ESP_ERR_NO_MEM, TAG, "No memory for new channel");
  219. new_chan->mode = I2S_COMM_MODE_NONE;
  220. new_chan->role = I2S_ROLE_MASTER; // Set default role to master
  221. new_chan->dir = dir;
  222. new_chan->state = I2S_CHAN_STATE_REGISTER;
  223. #if SOC_I2S_SUPPORTS_APLL
  224. new_chan->apll_en = false;
  225. #endif
  226. new_chan->mode_info = NULL;
  227. new_chan->controller = i2s_obj;
  228. #if CONFIG_PM_ENABLE
  229. new_chan->pm_lock = NULL; // Init in i2s_set_clock according to clock source
  230. #endif
  231. #if CONFIG_I2S_ISR_IRAM_SAFE
  232. new_chan->msg_que_storage = (uint8_t *)heap_caps_calloc(desc_num - 1, sizeof(uint8_t *), I2S_MEM_ALLOC_CAPS);
  233. ESP_GOTO_ON_FALSE(new_chan->msg_que_storage, ESP_ERR_NO_MEM, err, TAG, "No memory for message queue storage");
  234. new_chan->msg_que_struct = (StaticQueue_t *)heap_caps_calloc(1, sizeof(StaticQueue_t), I2S_MEM_ALLOC_CAPS);
  235. ESP_GOTO_ON_FALSE(new_chan->msg_que_struct, ESP_ERR_NO_MEM, err, TAG, "No memory for message queue struct");
  236. new_chan->msg_queue = xQueueCreateStatic(desc_num - 1, sizeof(uint8_t *), new_chan->msg_que_storage, new_chan->msg_que_struct);
  237. ESP_GOTO_ON_FALSE(new_chan->msg_queue, ESP_ERR_NO_MEM, err, TAG, "No memory for message queue");
  238. new_chan->mutex_struct = (StaticSemaphore_t *)heap_caps_calloc(1, sizeof(StaticSemaphore_t), I2S_MEM_ALLOC_CAPS);
  239. ESP_GOTO_ON_FALSE(new_chan->mutex_struct, ESP_ERR_NO_MEM, err, TAG, "No memory for mutex struct");
  240. new_chan->mutex = xSemaphoreCreateMutexStatic(new_chan->mutex_struct);
  241. ESP_GOTO_ON_FALSE(new_chan->mutex, ESP_ERR_NO_MEM, err, TAG, "No memory for mutex");
  242. new_chan->binary_struct = (StaticSemaphore_t *)heap_caps_calloc(1, sizeof(StaticSemaphore_t), I2S_MEM_ALLOC_CAPS);
  243. ESP_GOTO_ON_FALSE(new_chan->binary_struct, ESP_ERR_NO_MEM, err, TAG, "No memory for binary struct");
  244. new_chan->binary = xSemaphoreCreateBinaryStatic(new_chan->binary_struct);
  245. ESP_GOTO_ON_FALSE(new_chan->binary, ESP_ERR_NO_MEM, err, TAG, "No memory for binary");
  246. #else
  247. new_chan->msg_queue = xQueueCreate(desc_num - 1, sizeof(uint8_t *));
  248. ESP_GOTO_ON_FALSE(new_chan->msg_queue, ESP_ERR_NO_MEM, err, TAG, "No memory for message queue");
  249. new_chan->mutex = xSemaphoreCreateMutex();
  250. ESP_GOTO_ON_FALSE(new_chan->mutex, ESP_ERR_NO_MEM, err, TAG, "No memory for mutex semaphore");
  251. new_chan->binary = xSemaphoreCreateBinary();
  252. ESP_GOTO_ON_FALSE(new_chan->binary, ESP_ERR_NO_MEM, err, TAG, "No memory for binary semaphore");
  253. #endif
  254. new_chan->callbacks.on_recv = NULL;
  255. new_chan->callbacks.on_recv_q_ovf = NULL;
  256. new_chan->callbacks.on_sent = NULL;
  257. new_chan->callbacks.on_send_q_ovf = NULL;
  258. new_chan->start = NULL;
  259. new_chan->stop = NULL;
  260. if (dir == I2S_DIR_TX) {
  261. if (i2s_obj->tx_chan) {
  262. i2s_del_channel(i2s_obj->tx_chan);
  263. }
  264. i2s_obj->tx_chan = new_chan;
  265. } else {
  266. if (i2s_obj->rx_chan) {
  267. i2s_del_channel(i2s_obj->rx_chan);
  268. }
  269. i2s_obj->rx_chan = new_chan;
  270. }
  271. return ret;
  272. err:
  273. #if CONFIG_I2S_ISR_IRAM_SAFE
  274. if (new_chan->msg_que_storage) {
  275. free(new_chan->msg_que_storage);
  276. }
  277. if (new_chan->msg_que_struct) {
  278. free(new_chan->msg_que_struct);
  279. }
  280. if (new_chan->mutex_struct) {
  281. free(new_chan->mutex_struct);
  282. }
  283. if (new_chan->binary_struct) {
  284. free(new_chan->binary_struct);
  285. }
  286. #endif
  287. if (new_chan->msg_queue) {
  288. vQueueDelete(new_chan->msg_queue);
  289. }
  290. if (new_chan->mutex) {
  291. vSemaphoreDelete(new_chan->mutex);
  292. }
  293. if (new_chan->binary) {
  294. vSemaphoreDelete(new_chan->binary);
  295. }
  296. free(new_chan);
  297. return ret;
  298. }
  299. esp_err_t i2s_channel_register_event_callback(i2s_chan_handle_t handle, const i2s_event_callbacks_t *callbacks, void *user_data)
  300. {
  301. I2S_NULL_POINTER_CHECK(TAG, handle);
  302. I2S_NULL_POINTER_CHECK(TAG, callbacks);
  303. esp_err_t ret = ESP_OK;
  304. #if CONFIG_I2S_ISR_IRAM_SAFE
  305. if (callbacks->on_recv) {
  306. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_recv), ESP_ERR_INVALID_ARG, TAG, "on_recv callback not in IRAM");
  307. }
  308. if (callbacks->on_recv_q_ovf) {
  309. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_recv_q_ovf), ESP_ERR_INVALID_ARG, TAG, "on_recv_q_ovf callback not in IRAM");
  310. }
  311. if (callbacks->on_sent) {
  312. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_sent), ESP_ERR_INVALID_ARG, TAG, "on_sent callback not in IRAM");
  313. }
  314. if (callbacks->on_send_q_ovf) {
  315. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_send_q_ovf), ESP_ERR_INVALID_ARG, TAG, "on_send_q_ovf callback not in IRAM");
  316. }
  317. if (user_data) {
  318. ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, TAG, "user context not in internal RAM");
  319. }
  320. #endif
  321. xSemaphoreTake(handle->mutex, portMAX_DELAY);
  322. ESP_GOTO_ON_FALSE(handle->state < I2S_CHAN_STATE_RUNNING, ESP_ERR_INVALID_STATE, err, TAG, "invalid state, I2S has enabled");
  323. memcpy(&(handle->callbacks), callbacks, sizeof(i2s_event_callbacks_t));
  324. handle->user_data = user_data;
  325. err:
  326. xSemaphoreGive(handle->mutex);
  327. return ret;
  328. }
  329. uint32_t i2s_get_buf_size(i2s_chan_handle_t handle, uint32_t data_bit_width, uint32_t dma_frame_num)
  330. {
  331. uint32_t active_chan = handle->active_slot;
  332. uint32_t bytes_per_sample = ((data_bit_width + 15) / 16) * 2;
  333. uint32_t bytes_per_frame = bytes_per_sample * active_chan;
  334. uint32_t bufsize = dma_frame_num * bytes_per_frame;
  335. /* Limit DMA buffer size if it is out of range (DMA buffer limitation is 4092 bytes) */
  336. if (bufsize > I2S_DMA_BUFFER_MAX_SIZE) {
  337. uint32_t frame_num = I2S_DMA_BUFFER_MAX_SIZE / bytes_per_frame;
  338. bufsize = frame_num * bytes_per_frame;
  339. ESP_LOGW(TAG, "dma frame num is out of dma buffer size, limited to %"PRIu32, frame_num);
  340. }
  341. return bufsize;
  342. }
  343. esp_err_t i2s_free_dma_desc(i2s_chan_handle_t handle)
  344. {
  345. I2S_NULL_POINTER_CHECK(TAG, handle);
  346. if (!handle->dma.desc) {
  347. return ESP_OK;
  348. }
  349. for (int i = 0; i < handle->dma.desc_num; i++) {
  350. if (handle->dma.bufs[i]) {
  351. free(handle->dma.bufs[i]);
  352. }
  353. if (handle->dma.desc[i]) {
  354. free(handle->dma.desc[i]);
  355. }
  356. }
  357. if (handle->dma.bufs) {
  358. free(handle->dma.bufs);
  359. }
  360. if (handle->dma.desc) {
  361. free(handle->dma.desc);
  362. }
  363. handle->dma.desc = NULL;
  364. return ESP_OK;
  365. }
  366. esp_err_t i2s_alloc_dma_desc(i2s_chan_handle_t handle, uint32_t num, uint32_t bufsize)
  367. {
  368. I2S_NULL_POINTER_CHECK(TAG, handle);
  369. esp_err_t ret = ESP_OK;
  370. ESP_RETURN_ON_FALSE(bufsize <= I2S_DMA_BUFFER_MAX_SIZE, ESP_ERR_INVALID_ARG, TAG, "dma buffer can't be bigger than %d", I2S_DMA_BUFFER_MAX_SIZE);
  371. handle->dma.desc_num = num;
  372. handle->dma.buf_size = bufsize;
  373. /* Descriptors must be in the internal RAM */
  374. handle->dma.desc = (lldesc_t **)heap_caps_calloc(num, sizeof(lldesc_t *), I2S_MEM_ALLOC_CAPS);
  375. ESP_GOTO_ON_FALSE(handle->dma.desc, ESP_ERR_NO_MEM, err, TAG, "create I2S DMA decriptor array failed");
  376. handle->dma.bufs = (uint8_t **)heap_caps_calloc(num, sizeof(uint8_t *), I2S_MEM_ALLOC_CAPS);
  377. for (int i = 0; i < num; i++) {
  378. /* Allocate DMA descriptor */
  379. handle->dma.desc[i] = (lldesc_t *) heap_caps_calloc(1, sizeof(lldesc_t), I2S_DMA_ALLOC_CAPS);
  380. ESP_GOTO_ON_FALSE(handle->dma.desc[i], ESP_ERR_NO_MEM, err, TAG, "allocate DMA description failed");
  381. handle->dma.desc[i]->owner = 1;
  382. handle->dma.desc[i]->eof = 1;
  383. handle->dma.desc[i]->sosf = 0;
  384. handle->dma.desc[i]->length = bufsize;
  385. handle->dma.desc[i]->size = bufsize;
  386. handle->dma.desc[i]->offset = 0;
  387. handle->dma.bufs[i] = (uint8_t *) heap_caps_calloc(1, bufsize * sizeof(uint8_t), I2S_DMA_ALLOC_CAPS);
  388. handle->dma.desc[i]->buf = handle->dma.bufs[i];
  389. ESP_GOTO_ON_FALSE(handle->dma.desc[i]->buf, ESP_ERR_NO_MEM, err, TAG, "allocate DMA buffer failed");
  390. ESP_LOGV(TAG, "desc addr: %8p\tbuffer addr:%8p", handle->dma.desc[i], handle->dma.bufs[i]);
  391. }
  392. /* Connect DMA descriptor as a circle */
  393. for (int i = 0; i < num; i++) {
  394. /* Link to the next descriptor */
  395. handle->dma.desc[i]->empty = (uint32_t)((i < (num - 1)) ? (handle->dma.desc[i + 1]) : handle->dma.desc[0]);
  396. }
  397. if (handle->dir == I2S_DIR_RX) {
  398. i2s_ll_rx_set_eof_num(handle->controller->hal.dev, bufsize);
  399. }
  400. ESP_LOGD(TAG, "DMA malloc info: dma_desc_num = %"PRIu32", dma_desc_buf_size = dma_frame_num * slot_num * data_bit_width = %"PRIu32, num, bufsize);
  401. return ESP_OK;
  402. err:
  403. i2s_free_dma_desc(handle);
  404. return ret;
  405. }
  406. #if SOC_I2S_SUPPORTS_APLL
  407. uint32_t i2s_set_get_apll_freq(uint32_t mclk_freq_hz)
  408. {
  409. /* Calculate the expected APLL */
  410. int mclk_div = (int)((SOC_APLL_MIN_HZ / mclk_freq_hz) + 1);
  411. /* apll_freq = mclk * div
  412. * when div = 1, hardware will still divide 2
  413. * when div = 0, the final mclk will be unpredictable
  414. * So the div here should be at least 2 */
  415. mclk_div = mclk_div < 2 ? 2 : mclk_div;
  416. uint32_t expt_freq = mclk_freq_hz * mclk_div;
  417. if (expt_freq > SOC_APLL_MAX_HZ) {
  418. ESP_LOGE(TAG, "The required APLL frequecy exceed its maximum value");
  419. return 0;
  420. }
  421. uint32_t real_freq = 0;
  422. esp_err_t ret = periph_rtc_apll_freq_set(expt_freq, &real_freq);
  423. if (ret == ESP_ERR_INVALID_ARG) {
  424. ESP_LOGE(TAG, "set APLL freq failed due to invalid argument");
  425. return 0;
  426. }
  427. if (ret == ESP_ERR_INVALID_STATE) {
  428. ESP_LOGW(TAG, "APLL is occupied already, it is working at %"PRIu32" Hz while the expected frequency is %"PRIu32" Hz", real_freq, expt_freq);
  429. ESP_LOGW(TAG, "Trying to work at %"PRIu32" Hz...", real_freq);
  430. }
  431. ESP_LOGD(TAG, "APLL expected frequency is %"PRIu32" Hz, real frequency is %"PRIu32" Hz", expt_freq, real_freq);
  432. return real_freq;
  433. }
  434. #endif
  435. #if SOC_GDMA_SUPPORTED
  436. static bool IRAM_ATTR i2s_dma_rx_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
  437. {
  438. i2s_chan_handle_t handle = (i2s_chan_handle_t)user_data;
  439. portBASE_TYPE need_yield1 = 0;
  440. portBASE_TYPE need_yield2 = 0;
  441. portBASE_TYPE user_need_yield = 0;
  442. lldesc_t *finish_desc;
  443. uint32_t dummy;
  444. finish_desc = (lldesc_t *)event_data->rx_eof_desc_addr;
  445. i2s_event_data_t evt = {
  446. .data = &(finish_desc->buf),
  447. .size = handle->dma.buf_size,
  448. };
  449. if (handle->callbacks.on_recv) {
  450. user_need_yield |= handle->callbacks.on_recv(handle, &evt, handle->user_data);
  451. }
  452. if (xQueueIsQueueFullFromISR(handle->msg_queue)) {
  453. xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1);
  454. if (handle->callbacks.on_recv_q_ovf) {
  455. evt.data = NULL;
  456. user_need_yield |= handle->callbacks.on_recv_q_ovf(handle, &evt, handle->user_data);
  457. }
  458. }
  459. xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2);
  460. return need_yield1 | need_yield2 | user_need_yield;
  461. }
  462. static bool IRAM_ATTR i2s_dma_tx_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
  463. {
  464. i2s_chan_handle_t handle = (i2s_chan_handle_t)user_data;
  465. portBASE_TYPE need_yield1 = 0;
  466. portBASE_TYPE need_yield2 = 0;
  467. portBASE_TYPE user_need_yield = 0;
  468. lldesc_t *finish_desc;
  469. uint32_t dummy;
  470. finish_desc = (lldesc_t *)(event_data->tx_eof_desc_addr);
  471. i2s_event_data_t evt = {
  472. .data = &(finish_desc->buf),
  473. .size = handle->dma.buf_size,
  474. };
  475. if (handle->callbacks.on_sent) {
  476. user_need_yield |= handle->callbacks.on_sent(handle, &evt, handle->user_data);
  477. }
  478. if (xQueueIsQueueFullFromISR(handle->msg_queue)) {
  479. xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1);
  480. if (handle->callbacks.on_send_q_ovf) {
  481. evt.data = NULL;
  482. user_need_yield |= handle->callbacks.on_send_q_ovf(handle, &evt, handle->user_data);
  483. }
  484. }
  485. xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2);
  486. if (handle->dma.auto_clear) {
  487. uint8_t *sent_buf = (uint8_t *)finish_desc->buf;
  488. memset(sent_buf, 0, handle->dma.buf_size);
  489. }
  490. return need_yield1 | need_yield2 | user_need_yield;
  491. }
  492. #else
  493. static void IRAM_ATTR i2s_dma_rx_callback(void *arg)
  494. {
  495. portBASE_TYPE need_yield1 = 0;
  496. portBASE_TYPE need_yield2 = 0;
  497. portBASE_TYPE user_need_yield = 0;
  498. lldesc_t *finish_desc = NULL;
  499. i2s_event_data_t evt;
  500. i2s_chan_handle_t handle = (i2s_chan_handle_t)arg;
  501. uint32_t dummy;
  502. uint32_t status = i2s_hal_get_intr_status(&(handle->controller->hal));
  503. i2s_hal_clear_intr_status(&(handle->controller->hal), status);
  504. if (!status) {
  505. return;
  506. }
  507. if (handle && (status & I2S_LL_EVENT_RX_EOF)) {
  508. i2s_hal_get_in_eof_des_addr(&(handle->controller->hal), (uint32_t *)&finish_desc);
  509. evt.data = &(finish_desc->buf);
  510. evt.size = handle->dma.buf_size;
  511. if (handle->callbacks.on_recv) {
  512. user_need_yield |= handle->callbacks.on_recv(handle, &evt, handle->user_data);
  513. }
  514. if (xQueueIsQueueFullFromISR(handle->msg_queue)) {
  515. xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1);
  516. if (handle->callbacks.on_recv_q_ovf) {
  517. evt.data = NULL;
  518. user_need_yield |= handle->callbacks.on_recv_q_ovf(handle, &evt, handle->user_data);
  519. }
  520. }
  521. xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2);
  522. }
  523. if (need_yield1 || need_yield2 || user_need_yield) {
  524. portYIELD_FROM_ISR();
  525. }
  526. }
  527. static void IRAM_ATTR i2s_dma_tx_callback(void *arg)
  528. {
  529. portBASE_TYPE need_yield1 = 0;
  530. portBASE_TYPE need_yield2 = 0;
  531. portBASE_TYPE user_need_yield = 0;
  532. lldesc_t *finish_desc = NULL;
  533. i2s_event_data_t evt;
  534. i2s_chan_handle_t handle = (i2s_chan_handle_t)arg;
  535. uint32_t dummy;
  536. uint32_t status = i2s_hal_get_intr_status(&(handle->controller->hal));
  537. i2s_hal_clear_intr_status(&(handle->controller->hal), status);
  538. if (!status) {
  539. return;
  540. }
  541. if (handle && (status & I2S_LL_EVENT_TX_EOF)) {
  542. i2s_hal_get_out_eof_des_addr(&(handle->controller->hal), (uint32_t *)&finish_desc);
  543. evt.data = &(finish_desc->buf);
  544. evt.size = handle->dma.buf_size;
  545. if (handle->callbacks.on_sent) {
  546. user_need_yield |= handle->callbacks.on_sent(handle, &evt, handle->user_data);
  547. }
  548. if (xQueueIsQueueFullFromISR(handle->msg_queue)) {
  549. xQueueReceiveFromISR(handle->msg_queue, &dummy, &need_yield1);
  550. if (handle->callbacks.on_send_q_ovf) {
  551. evt.data = NULL;
  552. user_need_yield |= handle->callbacks.on_send_q_ovf(handle, &evt, handle->user_data);
  553. }
  554. }
  555. xQueueSendFromISR(handle->msg_queue, &(finish_desc->buf), &need_yield2);
  556. // Auto clear the dma buffer after data sent
  557. if (handle->dma.auto_clear) {
  558. uint8_t *buff = (uint8_t *)finish_desc->buf;
  559. memset(buff, 0, handle->dma.buf_size);
  560. }
  561. }
  562. if (need_yield1 || need_yield2 || user_need_yield) {
  563. portYIELD_FROM_ISR();
  564. }
  565. }
  566. #endif
  567. /**
  568. * @brief I2S DMA interrupt initialization
  569. * @note I2S will use GDMA if chip supports, and the interrupt is triggered by GDMA.
  570. *
  571. * @param handle I2S channel handle
  572. * @param intr_flag Interrupt allocation flag
  573. * @return
  574. * - ESP_OK I2S DMA interrupt initialize success
  575. * - ESP_ERR_NOT_FOUND GDMA channel not found
  576. * - ESP_ERR_INVALID_ARG Invalid arguments
  577. * - ESP_ERR_INVALID_STATE GDMA state error
  578. */
  579. esp_err_t i2s_init_dma_intr(i2s_chan_handle_t handle, int intr_flag)
  580. {
  581. i2s_port_t port_id = handle->controller->id;
  582. ESP_RETURN_ON_FALSE((port_id >= 0) && (port_id < SOC_I2S_NUM), ESP_ERR_INVALID_ARG, TAG, "invalid handle");
  583. #if SOC_GDMA_SUPPORTED
  584. /* Set GDMA trigger module */
  585. gdma_trigger_t trig = {.periph = GDMA_TRIG_PERIPH_I2S};
  586. switch (port_id) {
  587. #if SOC_I2S_NUM > 1
  588. case I2S_NUM_1:
  589. trig.instance_id = SOC_GDMA_TRIG_PERIPH_I2S1;
  590. break;
  591. #endif
  592. default:
  593. trig.instance_id = SOC_GDMA_TRIG_PERIPH_I2S0;
  594. break;
  595. }
  596. /* Set GDMA config */
  597. gdma_channel_alloc_config_t dma_cfg = {};
  598. if (handle->dir == I2S_DIR_TX) {
  599. dma_cfg.direction = GDMA_CHANNEL_DIRECTION_TX;
  600. /* Register a new GDMA tx channel */
  601. ESP_RETURN_ON_ERROR(gdma_new_channel(&dma_cfg, &handle->dma.dma_chan), TAG, "Register tx dma channel error");
  602. ESP_RETURN_ON_ERROR(gdma_connect(handle->dma.dma_chan, trig), TAG, "Connect tx dma channel error");
  603. gdma_tx_event_callbacks_t cb = {.on_trans_eof = i2s_dma_tx_callback};
  604. /* Set callback function for GDMA, the interrupt is triggered by GDMA, then the GDMA ISR will call the callback function */
  605. gdma_register_tx_event_callbacks(handle->dma.dma_chan, &cb, handle);
  606. } else {
  607. dma_cfg.direction = GDMA_CHANNEL_DIRECTION_RX;
  608. /* Register a new GDMA rx channel */
  609. ESP_RETURN_ON_ERROR(gdma_new_channel(&dma_cfg, &handle->dma.dma_chan), TAG, "Register rx dma channel error");
  610. ESP_RETURN_ON_ERROR(gdma_connect(handle->dma.dma_chan, trig), TAG, "Connect rx dma channel error");
  611. gdma_rx_event_callbacks_t cb = {.on_recv_eof = i2s_dma_rx_callback};
  612. /* Set callback function for GDMA, the interrupt is triggered by GDMA, then the GDMA ISR will call the callback function */
  613. gdma_register_rx_event_callbacks(handle->dma.dma_chan, &cb, handle);
  614. }
  615. #else
  616. intr_flag |= ESP_INTR_FLAG_SHARED;
  617. /* Initialize I2S module interrupt */
  618. if (handle->dir == I2S_DIR_TX) {
  619. esp_intr_alloc_intrstatus(i2s_periph_signal[port_id].irq, intr_flag,
  620. (uint32_t)i2s_ll_get_interrupt_status_reg(handle->controller->hal.dev), I2S_LL_TX_EVENT_MASK,
  621. i2s_dma_tx_callback, handle, &handle->dma.dma_chan);
  622. } else {
  623. esp_intr_alloc_intrstatus(i2s_periph_signal[port_id].irq, intr_flag,
  624. (uint32_t)i2s_ll_get_interrupt_status_reg(handle->controller->hal.dev), I2S_LL_RX_EVENT_MASK,
  625. i2s_dma_rx_callback, handle, &handle->dma.dma_chan);
  626. }
  627. /* Start DMA */
  628. i2s_ll_enable_dma(handle->controller->hal.dev, true);
  629. #endif // SOC_GDMA_SUPPORTED
  630. return ESP_OK;
  631. }
  632. void i2s_gpio_check_and_set(gpio_num_t gpio, uint32_t signal_idx, bool is_input, bool is_invert)
  633. {
  634. /* Ignore the pin if pin = I2S_GPIO_UNUSED */
  635. if (gpio != I2S_GPIO_UNUSED) {
  636. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[gpio], PIN_FUNC_GPIO);
  637. if (is_input) {
  638. /* Set direction, for some GPIOs, the input function are not enabled as default */
  639. gpio_set_direction(gpio, GPIO_MODE_INPUT);
  640. esp_rom_gpio_connect_in_signal(gpio, signal_idx, is_invert);
  641. } else {
  642. gpio_set_direction(gpio, GPIO_MODE_OUTPUT);
  643. esp_rom_gpio_connect_out_signal(gpio, signal_idx, is_invert, 0);
  644. }
  645. }
  646. }
  647. void i2s_gpio_loopback_set(gpio_num_t gpio, uint32_t out_sig_idx, uint32_t in_sig_idx)
  648. {
  649. if (gpio != I2S_GPIO_UNUSED) {
  650. gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[gpio], PIN_FUNC_GPIO);
  651. gpio_set_direction(gpio, GPIO_MODE_INPUT_OUTPUT);
  652. esp_rom_gpio_connect_out_signal(gpio, out_sig_idx, 0, 0);
  653. esp_rom_gpio_connect_in_signal(gpio, in_sig_idx, 0);
  654. }
  655. }
  656. esp_err_t i2s_check_set_mclk(i2s_port_t id, gpio_num_t gpio_num, bool is_apll, bool is_invert)
  657. {
  658. if (gpio_num == I2S_GPIO_UNUSED) {
  659. return ESP_OK;
  660. }
  661. #if CONFIG_IDF_TARGET_ESP32
  662. ESP_RETURN_ON_FALSE((gpio_num == GPIO_NUM_0 || gpio_num == GPIO_NUM_1 || gpio_num == GPIO_NUM_3),
  663. ESP_ERR_INVALID_ARG, TAG,
  664. "ESP32 only support to set GPIO0/GPIO1/GPIO3 as mclk signal, error GPIO number:%d", gpio_num);
  665. bool is_i2s0 = id == I2S_NUM_0;
  666. if (gpio_num == GPIO_NUM_0) {
  667. gpio_hal_iomux_func_sel(PERIPHS_IO_MUX_GPIO0_U, FUNC_GPIO0_CLK_OUT1);
  668. gpio_ll_iomux_pin_ctrl(is_apll ? 0xFFF6 : (is_i2s0 ? 0xFFF0 : 0xFFFF));
  669. } else if (gpio_num == GPIO_NUM_1) {
  670. gpio_hal_iomux_func_sel(PERIPHS_IO_MUX_U0TXD_U, FUNC_U0TXD_CLK_OUT3);
  671. gpio_ll_iomux_pin_ctrl(is_apll ? 0xF6F6 : (is_i2s0 ? 0xF0F0 : 0xF0FF));
  672. } else {
  673. gpio_hal_iomux_func_sel(PERIPHS_IO_MUX_U0RXD_U, FUNC_U0RXD_CLK_OUT2);
  674. gpio_ll_iomux_pin_ctrl(is_apll ? 0xFF66 : (is_i2s0 ? 0xFF00 : 0xFF0F));
  675. }
  676. #else
  677. ESP_RETURN_ON_FALSE(GPIO_IS_VALID_GPIO(gpio_num), ESP_ERR_INVALID_ARG, TAG, "mck_io_num invalid");
  678. i2s_gpio_check_and_set(gpio_num, i2s_periph_signal[id].mck_out_sig, false, is_invert);
  679. #endif
  680. ESP_LOGD(TAG, "MCLK is pinned to GPIO%d on I2S%d", id, gpio_num);
  681. return ESP_OK;
  682. }
  683. /*---------------------------------------------------------------------------
  684. I2S bus Public APIs
  685. ----------------------------------------------------------------------------
  686. Scope: Public
  687. ----------------------------------------------------------------------------*/
  688. esp_err_t i2s_new_channel(const i2s_chan_config_t *chan_cfg, i2s_chan_handle_t *tx_handle, i2s_chan_handle_t *rx_handle)
  689. {
  690. #if CONFIG_I2S_ENABLE_DEBUG_LOG
  691. esp_log_level_set(TAG, ESP_LOG_DEBUG);
  692. #endif
  693. /* Parameter validity check */
  694. I2S_NULL_POINTER_CHECK(TAG, chan_cfg);
  695. I2S_NULL_POINTER_CHECK(TAG, tx_handle || rx_handle);
  696. ESP_RETURN_ON_FALSE(chan_cfg->id < SOC_I2S_NUM || chan_cfg->id == I2S_NUM_AUTO, ESP_ERR_INVALID_ARG, TAG, "invalid I2S port id");
  697. ESP_RETURN_ON_FALSE(chan_cfg->dma_desc_num >= 2, ESP_ERR_INVALID_ARG, TAG, "there should be at least 2 DMA buffers");
  698. esp_err_t ret = ESP_OK;
  699. i2s_controller_t *i2s_obj = NULL;
  700. i2s_port_t id = chan_cfg->id;
  701. bool channel_found = false;
  702. uint8_t chan_search_mask = 0;
  703. chan_search_mask |= tx_handle ? I2S_DIR_TX : 0;
  704. chan_search_mask |= rx_handle ? I2S_DIR_RX : 0;
  705. /* Channel will be registered to one i2s port automatically if id is I2S_NUM_AUTO
  706. * Otherwise, the channel will be registered to the specific port. */
  707. if (id == I2S_NUM_AUTO) {
  708. for (int i = 0; i < SOC_I2S_NUM && !channel_found; i++) {
  709. i2s_obj = i2s_acquire_controller_obj(i);
  710. if (!i2s_obj) {
  711. continue;
  712. }
  713. channel_found = i2s_take_available_channel(i2s_obj, chan_search_mask);
  714. }
  715. ESP_RETURN_ON_FALSE(i2s_obj, ESP_ERR_NOT_FOUND, TAG, "get i2s object failed");
  716. } else {
  717. i2s_obj = i2s_acquire_controller_obj(id);
  718. ESP_RETURN_ON_FALSE(i2s_obj, ESP_ERR_NOT_FOUND, TAG, "get i2s object failed");
  719. channel_found = i2s_take_available_channel(i2s_obj, chan_search_mask);
  720. }
  721. ESP_GOTO_ON_FALSE(channel_found, ESP_ERR_NOT_FOUND, err, TAG, "no available channel found");
  722. /* Register and specify the tx handle */
  723. if (tx_handle) {
  724. ESP_GOTO_ON_ERROR(i2s_register_channel(i2s_obj, I2S_DIR_TX, chan_cfg->dma_desc_num),
  725. err, TAG, "register I2S tx channel failed");
  726. i2s_obj->tx_chan->role = chan_cfg->role;
  727. i2s_obj->tx_chan->dma.auto_clear = chan_cfg->auto_clear;
  728. i2s_obj->tx_chan->dma.desc_num = chan_cfg->dma_desc_num;
  729. i2s_obj->tx_chan->dma.frame_num = chan_cfg->dma_frame_num;
  730. i2s_obj->tx_chan->start = i2s_tx_channel_start;
  731. i2s_obj->tx_chan->stop = i2s_tx_channel_stop;
  732. *tx_handle = i2s_obj->tx_chan;
  733. ESP_LOGD(TAG, "tx channel is registered on I2S%d successfully", i2s_obj->id);
  734. }
  735. /* Register and specify the rx handle */
  736. if (rx_handle) {
  737. ESP_GOTO_ON_ERROR(i2s_register_channel(i2s_obj, I2S_DIR_RX, chan_cfg->dma_desc_num),
  738. err, TAG, "register I2S rx channel failed");
  739. i2s_obj->rx_chan->role = chan_cfg->role;
  740. i2s_obj->rx_chan->dma.desc_num = chan_cfg->dma_desc_num;
  741. i2s_obj->rx_chan->dma.frame_num = chan_cfg->dma_frame_num;
  742. i2s_obj->rx_chan->start = i2s_rx_channel_start;
  743. i2s_obj->rx_chan->stop = i2s_rx_channel_stop;
  744. *rx_handle = i2s_obj->rx_chan;
  745. ESP_LOGD(TAG, "rx channel is registered on I2S%d successfully", i2s_obj->id);
  746. }
  747. if ((tx_handle != NULL) && (rx_handle != NULL)) {
  748. i2s_obj->full_duplex = true;
  749. }
  750. return ESP_OK;
  751. /* i2s_obj allocated but register channel failed */
  752. err:
  753. /* if the controller object has no channel, find the corresponding global object and destroy it */
  754. if (i2s_obj != NULL && i2s_obj->rx_chan == NULL && i2s_obj->tx_chan == NULL) {
  755. for (int i = 0; i < SOC_I2S_NUM; i++) {
  756. if (i2s_obj == g_i2s.controller[i]) {
  757. i2s_destroy_controller_obj(&g_i2s.controller[i]);
  758. break;
  759. }
  760. }
  761. }
  762. return ret;
  763. }
  764. esp_err_t i2s_del_channel(i2s_chan_handle_t handle)
  765. {
  766. I2S_NULL_POINTER_CHECK(TAG, handle);
  767. ESP_RETURN_ON_FALSE(handle->state < I2S_CHAN_STATE_RUNNING, ESP_ERR_INVALID_STATE, TAG, "the channel can't be deleted unless it is disabled");
  768. i2s_controller_t *i2s_obj = handle->controller;
  769. int __attribute__((unused)) id = i2s_obj->id;
  770. i2s_dir_t __attribute__((unused)) dir = handle->dir;
  771. bool is_bound = true;
  772. #if SOC_I2S_HW_VERSION_2
  773. if (dir == I2S_DIR_TX) {
  774. i2s_ll_tx_disable_clock(handle->controller->hal.dev);
  775. } else {
  776. i2s_ll_rx_disable_clock(handle->controller->hal.dev);
  777. }
  778. #endif
  779. #if SOC_I2S_SUPPORTS_APLL
  780. if (handle->apll_en) {
  781. /* Must switch back to D2CLK on ESP32-S2,
  782. * because the clock of some registers are bound to APLL,
  783. * otherwise, once APLL is disabled, the registers can't be updated anymore */
  784. if (handle->dir == I2S_DIR_TX) {
  785. i2s_ll_tx_clk_set_src(handle->controller->hal.dev, I2S_CLK_SRC_DEFAULT);
  786. } else {
  787. i2s_ll_rx_clk_set_src(handle->controller->hal.dev, I2S_CLK_SRC_DEFAULT);
  788. }
  789. periph_rtc_apll_release();
  790. }
  791. #endif
  792. #if CONFIG_PM_ENABLE
  793. if (handle->pm_lock) {
  794. esp_pm_lock_delete(handle->pm_lock);
  795. }
  796. #endif
  797. if (handle->mode_info) {
  798. free(handle->mode_info);
  799. }
  800. if (handle->dma.desc) {
  801. i2s_free_dma_desc(handle);
  802. }
  803. #if CONFIG_I2S_ISR_IRAM_SAFE
  804. if (handle->msg_que_storage) {
  805. free(handle->msg_que_storage);
  806. }
  807. if (handle->msg_que_struct) {
  808. free(handle->msg_que_struct);
  809. }
  810. if (handle->mutex) {
  811. free(handle->mutex_struct);
  812. }
  813. if (handle->binary_struct) {
  814. free(handle->binary_struct);
  815. }
  816. #endif
  817. if (handle->msg_queue) {
  818. vQueueDelete(handle->msg_queue);
  819. }
  820. if (handle->mutex) {
  821. vSemaphoreDelete(handle->mutex);
  822. }
  823. if (handle->binary) {
  824. vSemaphoreDelete(handle->binary);
  825. }
  826. #if SOC_I2S_HW_VERSION_1
  827. i2s_obj->chan_occupancy = 0;
  828. #else
  829. i2s_obj->chan_occupancy &= ~(uint32_t)dir;
  830. #endif
  831. if (handle->dma.dma_chan) {
  832. #if SOC_GDMA_SUPPORTED
  833. gdma_del_channel(handle->dma.dma_chan);
  834. #else
  835. esp_intr_free(handle->dma.dma_chan);
  836. #endif
  837. }
  838. if (handle == i2s_obj->tx_chan) {
  839. free(i2s_obj->tx_chan);
  840. i2s_obj->tx_chan = NULL;
  841. i2s_obj->full_duplex = false;
  842. } else if (handle == i2s_obj->rx_chan) {
  843. free(i2s_obj->rx_chan);
  844. i2s_obj->rx_chan = NULL;
  845. i2s_obj->full_duplex = false;
  846. } else {
  847. /* Indicate the delete channel is an unbound free channel */
  848. is_bound = false;
  849. free(handle);
  850. }
  851. /* If the delete channel was bound to a controller before,
  852. we need to destroy this controller object if there is no channel any more */
  853. if (is_bound) {
  854. if (!(i2s_obj->tx_chan) && !(i2s_obj->rx_chan)) {
  855. i2s_destroy_controller_obj(&g_i2s.controller[i2s_obj->id]);
  856. }
  857. ESP_LOGD(TAG, "%s channel on I2S%d deleted", dir == I2S_DIR_TX ? "tx" : "rx", id);
  858. }
  859. return ESP_OK;
  860. }
  861. esp_err_t i2s_channel_get_info(i2s_chan_handle_t handle, i2s_chan_info_t *chan_info)
  862. {
  863. I2S_NULL_POINTER_CHECK(TAG, handle);
  864. I2S_NULL_POINTER_CHECK(TAG, chan_info);
  865. /* Find whether the handle is a registered i2s handle or still available */
  866. for (int i = 0; i < SOC_I2S_NUM; i++) {
  867. if (g_i2s.controller[i] != NULL) {
  868. if (g_i2s.controller[i]->tx_chan == handle ||
  869. g_i2s.controller[i]->rx_chan == handle) {
  870. goto found;
  871. }
  872. }
  873. }
  874. return ESP_ERR_NOT_FOUND;
  875. found:
  876. /* Assign the handle information */
  877. xSemaphoreTake(handle->mutex, portMAX_DELAY);
  878. chan_info->id = handle->controller->id;
  879. chan_info->dir = handle->dir;
  880. chan_info->role = handle->role;
  881. chan_info->mode = handle->mode;
  882. if (handle->controller->full_duplex) {
  883. if (handle->dir == I2S_DIR_TX) {
  884. chan_info->pair_chan = handle->controller->rx_chan;
  885. } else {
  886. chan_info->pair_chan = handle->controller->tx_chan;
  887. }
  888. } else {
  889. chan_info->pair_chan = NULL;
  890. }
  891. xSemaphoreGive(handle->mutex);
  892. return ESP_OK;
  893. }
  894. esp_err_t i2s_channel_enable(i2s_chan_handle_t handle)
  895. {
  896. I2S_NULL_POINTER_CHECK(TAG, handle);
  897. esp_err_t ret = ESP_OK;
  898. xSemaphoreTake(handle->mutex, portMAX_DELAY);
  899. ESP_GOTO_ON_FALSE(handle->state == I2S_CHAN_STATE_READY, ESP_ERR_INVALID_STATE, err, TAG, "the channel has already enabled or not initialized");
  900. #if CONFIG_PM_ENABLE
  901. esp_pm_lock_acquire(handle->pm_lock);
  902. #endif
  903. handle->dma.curr_ptr = NULL;
  904. handle->dma.rw_pos = 0;
  905. handle->start(handle);
  906. handle->state = I2S_CHAN_STATE_RUNNING;
  907. /* Reset queue */
  908. xQueueReset(handle->msg_queue);
  909. xSemaphoreGive(handle->mutex);
  910. /* Give the binary semaphore to enable reading / writing task */
  911. xSemaphoreGive(handle->binary);
  912. ESP_LOGD(TAG, "i2s %s channel enabled", handle->dir == I2S_DIR_TX ? "tx" : "rx");
  913. return ret;
  914. err:
  915. xSemaphoreGive(handle->mutex);
  916. return ret;
  917. }
  918. esp_err_t i2s_channel_disable(i2s_chan_handle_t handle)
  919. {
  920. I2S_NULL_POINTER_CHECK(TAG, handle);
  921. esp_err_t ret = ESP_OK;
  922. xSemaphoreTake(handle->mutex, portMAX_DELAY);
  923. ESP_GOTO_ON_FALSE(handle->state > I2S_CHAN_STATE_READY, ESP_ERR_INVALID_STATE, err, TAG, "the channel has not been enabled yet");
  924. /* Update the state to force quit the current reading/wrinting operation */
  925. handle->state = I2S_CHAN_STATE_READY;
  926. /* Waiting for reading/wrinting operation quit */
  927. xSemaphoreTake(handle->binary, portMAX_DELAY);
  928. handle->stop(handle);
  929. #if CONFIG_PM_ENABLE
  930. esp_pm_lock_release(handle->pm_lock);
  931. #endif
  932. xSemaphoreGive(handle->mutex);
  933. ESP_LOGD(TAG, "i2s %s channel disabled", handle->dir == I2S_DIR_TX ? "tx" : "rx");
  934. return ret;
  935. err:
  936. xSemaphoreGive(handle->mutex);
  937. return ret;
  938. }
  939. esp_err_t i2s_channel_write(i2s_chan_handle_t handle, const void *src, size_t size, size_t *bytes_written, uint32_t timeout_ms)
  940. {
  941. I2S_NULL_POINTER_CHECK(TAG, handle);
  942. ESP_RETURN_ON_FALSE(handle->dir == I2S_DIR_TX, ESP_ERR_INVALID_ARG, TAG, "this channel is not tx channel");
  943. esp_err_t ret = ESP_OK;
  944. char *data_ptr;
  945. char *src_byte;
  946. size_t bytes_can_write;
  947. *bytes_written = 0;
  948. /* The binary semaphore can only be taken when the channel has been enabled and no other writing operation in progress */
  949. ESP_RETURN_ON_FALSE(xSemaphoreTake(handle->binary, pdMS_TO_TICKS(timeout_ms)) == pdTRUE, ESP_ERR_INVALID_STATE, TAG, "The channel is not enabled");
  950. src_byte = (char *)src;
  951. while (size > 0 && handle->state == I2S_CHAN_STATE_RUNNING) {
  952. if (handle->dma.rw_pos == handle->dma.buf_size || handle->dma.curr_ptr == NULL) {
  953. if (xQueueReceive(handle->msg_queue, &(handle->dma.curr_ptr), pdMS_TO_TICKS(timeout_ms)) == pdFALSE) {
  954. ret = ESP_ERR_TIMEOUT;
  955. break;
  956. }
  957. handle->dma.rw_pos = 0;
  958. }
  959. data_ptr = (char *)handle->dma.curr_ptr;
  960. data_ptr += handle->dma.rw_pos;
  961. bytes_can_write = handle->dma.buf_size - handle->dma.rw_pos;
  962. if (bytes_can_write > size) {
  963. bytes_can_write = size;
  964. }
  965. memcpy(data_ptr, src_byte, bytes_can_write);
  966. size -= bytes_can_write;
  967. src_byte += bytes_can_write;
  968. handle->dma.rw_pos += bytes_can_write;
  969. (*bytes_written) += bytes_can_write;
  970. }
  971. xSemaphoreGive(handle->binary);
  972. return ret;
  973. }
  974. esp_err_t i2s_channel_read(i2s_chan_handle_t handle, void *dest, size_t size, size_t *bytes_read, uint32_t timeout_ms)
  975. {
  976. I2S_NULL_POINTER_CHECK(TAG, handle);
  977. ESP_RETURN_ON_FALSE(handle->dir == I2S_DIR_RX, ESP_ERR_INVALID_ARG, TAG, "this channel is not rx channel");
  978. esp_err_t ret = ESP_OK;
  979. uint8_t *data_ptr;
  980. uint8_t *dest_byte;
  981. int bytes_can_read;
  982. *bytes_read = 0;
  983. dest_byte = (uint8_t *)dest;
  984. /* The binary semaphore can only be taken when the channel has been enabled and no other reading operation in progress */
  985. ESP_RETURN_ON_FALSE(xSemaphoreTake(handle->binary, pdMS_TO_TICKS(timeout_ms)) == pdTRUE, ESP_ERR_INVALID_STATE, TAG, "The channel is not enabled");
  986. while (size > 0 && handle->state == I2S_CHAN_STATE_RUNNING) {
  987. if (handle->dma.rw_pos == handle->dma.buf_size || handle->dma.curr_ptr == NULL) {
  988. if (xQueueReceive(handle->msg_queue, &(handle->dma.curr_ptr), pdMS_TO_TICKS(timeout_ms)) == pdFALSE) {
  989. ret = ESP_ERR_TIMEOUT;
  990. break;
  991. }
  992. handle->dma.rw_pos = 0;
  993. }
  994. data_ptr = (uint8_t *)handle->dma.curr_ptr;
  995. data_ptr += handle->dma.rw_pos;
  996. bytes_can_read = handle->dma.buf_size - handle->dma.rw_pos;
  997. if (bytes_can_read > (int)size) {
  998. bytes_can_read = size;
  999. }
  1000. memcpy(dest_byte, data_ptr, bytes_can_read);
  1001. size -= bytes_can_read;
  1002. dest_byte += bytes_can_read;
  1003. handle->dma.rw_pos += bytes_can_read;
  1004. (*bytes_read) += bytes_can_read;
  1005. }
  1006. xSemaphoreGive(handle->binary);
  1007. return ret;
  1008. }
  1009. /*---------------------------------------------------------------------------
  1010. I2S Platform APIs
  1011. ----------------------------------------------------------------------------
  1012. Scope: This file and ADC/DAC/LCD driver
  1013. ----------------------------------------------------------------------------*/
  1014. esp_err_t i2s_platform_acquire_occupation(int id, const char *comp_name)
  1015. {
  1016. esp_err_t ret = ESP_OK;
  1017. const char *occupied_comp = NULL;
  1018. ESP_RETURN_ON_FALSE(id < SOC_I2S_NUM, ESP_ERR_INVALID_ARG, TAG, "invalid i2s port id");
  1019. portENTER_CRITICAL(&g_i2s.spinlock);
  1020. if ((!g_i2s.controller[id]) && (g_i2s.comp_name[id] == NULL)) {
  1021. g_i2s.comp_name[id] = comp_name;
  1022. /* Enable module clock */
  1023. periph_module_enable(i2s_periph_signal[id].module);
  1024. i2s_ll_enable_clock(I2S_LL_GET_HW(id));
  1025. } else {
  1026. occupied_comp = g_i2s.comp_name[id];
  1027. ret = ESP_ERR_NOT_FOUND;
  1028. }
  1029. portEXIT_CRITICAL(&g_i2s.spinlock);
  1030. if (occupied_comp != NULL) {
  1031. ESP_LOGW(TAG, "i2s controller %d has been occupied by %s", id, occupied_comp);
  1032. }
  1033. return ret;
  1034. }
  1035. esp_err_t i2s_platform_release_occupation(int id)
  1036. {
  1037. esp_err_t ret = ESP_OK;
  1038. ESP_RETURN_ON_FALSE(id < SOC_I2S_NUM, ESP_ERR_INVALID_ARG, TAG, "invalid i2s port id");
  1039. portENTER_CRITICAL(&g_i2s.spinlock);
  1040. if (!g_i2s.controller[id]) {
  1041. g_i2s.comp_name[id] = NULL;
  1042. /* Disable module clock */
  1043. periph_module_disable(i2s_periph_signal[id].module);
  1044. i2s_ll_disable_clock(I2S_LL_GET_HW(id));
  1045. } else {
  1046. ret = ESP_ERR_INVALID_STATE;
  1047. }
  1048. portEXIT_CRITICAL(&g_i2s.spinlock);
  1049. return ret;
  1050. }
  1051. // Only used in `test_i2s_iram.c` to write DMA buffer directly
  1052. size_t inline i2s_platform_get_dma_buffer_offset(void)
  1053. {
  1054. /* Force to transfer address '0' into 'i2s_chan_handle_t' type,
  1055. * then find the corresponding field , the address of this field is the offset of this type */
  1056. return (size_t)&(((i2s_chan_handle_t)0)->dma.bufs);
  1057. }