dac_continuous.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /*
  2. * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <stdatomic.h>
  7. #include <string.h>
  8. #include <sys/queue.h>
  9. #include "freertos/FreeRTOS.h"
  10. #include "freertos/queue.h"
  11. #include "freertos/semphr.h"
  12. #include "freertos/idf_additions.h"
  13. #include "sdkconfig.h"
  14. #include "rom/lldesc.h"
  15. #include "soc/soc_caps.h"
  16. #include "driver/dac_continuous.h"
  17. #include "dac_priv_common.h"
  18. #include "dac_priv_dma.h"
  19. #if CONFIG_DAC_ENABLE_DEBUG_LOG
  20. // The local log level must be defined before including esp_log.h
  21. // Set the maximum log level for this source file
  22. #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
  23. #endif
  24. #include "esp_check.h"
  25. #if CONFIG_PM_ENABLE
  26. #include "esp_pm.h"
  27. #endif
  28. #define DAC_DMA_MAX_BUF_SIZE 4092 // Max DMA buffer size is 4095 but better to align with 4 bytes, so set 4092 here
  29. #if CONFIG_DAC_ISR_IRAM_SAFE || CONFIG_DAC_CTRL_FUNC_IN_IRAM
  30. #define DAC_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
  31. #else
  32. #define DAC_MEM_ALLOC_CAPS MALLOC_CAP_DEFAULT
  33. #endif
  34. #if CONFIG_DAC_ISR_IRAM_SAFE
  35. #define DAC_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_LOWMED | ESP_INTR_FLAG_IRAM | ESP_INTR_FLAG_INTRDISABLED | ESP_INTR_FLAG_SHARED)
  36. #else
  37. #define DAC_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_LOWMED | ESP_INTR_FLAG_INTRDISABLED | ESP_INTR_FLAG_SHARED)
  38. #endif
  39. #define DAC_DMA_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA)
  40. #define DAC_STAILQ_REMOVE(head, elm, type, field) do { \
  41. if ((head)->stqh_first == (elm)) { \
  42. STAILQ_REMOVE_HEAD((head), field); \
  43. } else { \
  44. struct type *curelm = (head)->stqh_first; \
  45. while (curelm->field.stqe_next != (elm) && \
  46. curelm->field.stqe_next != NULL) \
  47. curelm = curelm->field.stqe_next; \
  48. if (curelm->field.stqe_next && (curelm->field.stqe_next = \
  49. curelm->field.stqe_next->field.stqe_next) == NULL) \
  50. (head)->stqh_last = &(curelm)->field.stqe_next; \
  51. } \
  52. } while (/*CONSTCOND*/0)
  53. struct dac_continuous_s {
  54. uint32_t chan_cnt;
  55. dac_continuous_config_t cfg;
  56. atomic_bool is_enabled;
  57. atomic_bool is_cyclic;
  58. atomic_bool is_running;
  59. atomic_bool is_async;
  60. intr_handle_t intr_handle; /* Interrupt handle */
  61. #if CONFIG_PM_ENABLE
  62. esp_pm_lock_handle_t pm_lock;
  63. #endif
  64. SemaphoreHandle_t mutex;
  65. QueueHandle_t desc_pool; /* The pool of available descriptors
  66. * The descriptors in the pool are not linked in to pending chain */
  67. lldesc_t **desc;
  68. uint8_t **bufs;
  69. STAILQ_HEAD(desc_chain_s, lldesc_s) head; /* Head of the descriptor chain
  70. * The descriptors in the chain are pending to be sent or sending now */
  71. dac_event_callbacks_t cbs; /* Interrupt callbacks */
  72. void *user_data;
  73. };
  74. static const char *TAG = "dac_continuous";
  75. static bool s_dma_in_use = false;
  76. static portMUX_TYPE desc_spinlock = portMUX_INITIALIZER_UNLOCKED;
  77. #define DESC_ENTER_CRITICAL() portENTER_CRITICAL(&desc_spinlock)
  78. #define DESC_EXIT_CRITICAL() portEXIT_CRITICAL(&desc_spinlock)
  79. #define DESC_ENTER_CRITICAL_ISR() portENTER_CRITICAL_ISR(&desc_spinlock)
  80. #define DESC_EXIT_CRITICAL_ISR() portEXIT_CRITICAL_ISR(&desc_spinlock)
  81. static void s_dac_free_dma_desc(dac_continuous_handle_t handle)
  82. {
  83. STAILQ_INIT(&handle->head);
  84. if (handle->desc != NULL) {
  85. if (handle->desc[0]) {
  86. free(handle->desc[0]);
  87. }
  88. free(handle->desc);
  89. handle->desc = NULL;
  90. }
  91. if (handle->bufs != NULL) {
  92. for (int i = 0; i < handle->cfg.desc_num; i++) {
  93. if (handle->bufs[i]) {
  94. free(handle->bufs[i]);
  95. handle->bufs[i] = NULL;
  96. }
  97. }
  98. free(handle->bufs);
  99. handle->bufs = NULL;
  100. }
  101. }
  102. static esp_err_t s_dac_alloc_dma_desc(dac_continuous_handle_t handle)
  103. {
  104. esp_err_t ret = ESP_OK;
  105. STAILQ_INIT(&handle->head);
  106. handle->desc = (lldesc_t **) heap_caps_calloc(handle->cfg.desc_num, sizeof(lldesc_t *), DAC_DMA_ALLOC_CAPS);
  107. ESP_RETURN_ON_FALSE(handle->desc, ESP_ERR_NO_MEM, TAG, "failed to allocate dma descriptor array");
  108. handle->bufs = (uint8_t **) heap_caps_calloc(handle->cfg.desc_num, sizeof(uint8_t *), DAC_DMA_ALLOC_CAPS);
  109. ESP_RETURN_ON_FALSE(handle->bufs, ESP_ERR_NO_MEM, TAG, "failed to allocate dma buffer array");
  110. lldesc_t *descs = (lldesc_t *)heap_caps_calloc(handle->cfg.desc_num, sizeof(lldesc_t), DAC_DMA_ALLOC_CAPS);
  111. ESP_RETURN_ON_FALSE(descs, ESP_ERR_NO_MEM, TAG, "failed to allocate dma descriptors");
  112. for (int cnt = 0; cnt < handle->cfg.desc_num; cnt++) {
  113. /* Allocate DMA descriptor */
  114. handle->desc[cnt] = &descs[cnt];
  115. ESP_GOTO_ON_FALSE(handle->desc[cnt], ESP_ERR_NO_MEM, err, TAG, "failed to allocate dma descriptor");
  116. ESP_LOGD(TAG, "desc[%d] %p", cnt, handle->desc[cnt]);
  117. /* Allocate DMA buffer */
  118. handle->bufs[cnt] = (uint8_t *) heap_caps_calloc(1, handle->cfg.buf_size, DAC_DMA_ALLOC_CAPS);
  119. ESP_GOTO_ON_FALSE(handle->bufs[cnt], ESP_ERR_NO_MEM, err, TAG, "failed to allocate dma buffer");
  120. /* Assign initial value */
  121. lldesc_config(handle->desc[cnt], LLDESC_SW_OWNED, 1, 0, handle->cfg.buf_size);
  122. handle->desc[cnt]->size = handle->cfg.buf_size;
  123. handle->desc[cnt]->buf = handle->bufs[cnt];
  124. handle->desc[cnt]->offset = 0;
  125. }
  126. return ESP_OK;
  127. err:
  128. /* Free DMA buffer if failed to allocate memory */
  129. s_dac_free_dma_desc(handle);
  130. return ret;
  131. }
  132. static void IRAM_ATTR s_dac_default_intr_handler(void *arg)
  133. {
  134. dac_continuous_handle_t handle = (dac_continuous_handle_t)arg;
  135. uint32_t dummy;
  136. BaseType_t need_awoke = pdFALSE;
  137. BaseType_t tmp = pdFALSE;
  138. uint32_t intr_mask = dac_dma_periph_intr_is_triggered();
  139. if (intr_mask & DAC_DMA_EOF_INTR) {
  140. lldesc_t *fdesc = (lldesc_t *)dac_dma_periph_intr_get_eof_desc();
  141. if (!atomic_load(&handle->is_cyclic)) {
  142. /* Remove the descriptor in the chain that finished sent */
  143. DESC_ENTER_CRITICAL_ISR();
  144. if (STAILQ_FIRST(&handle->head) != NULL) {
  145. DAC_STAILQ_REMOVE(&handle->head, fdesc, lldesc_s, qe);
  146. }
  147. DESC_EXIT_CRITICAL_ISR();
  148. if (xQueueIsQueueFullFromISR(handle->desc_pool) == pdTRUE) {
  149. xQueueReceiveFromISR(handle->desc_pool, &dummy, &tmp);
  150. need_awoke |= tmp;
  151. }
  152. xQueueSendFromISR(handle->desc_pool, &fdesc, &tmp);
  153. need_awoke |= tmp;
  154. }
  155. if (handle->cbs.on_convert_done) {
  156. dac_event_data_t evt_data = {
  157. .buf = (void *)fdesc->buf,
  158. .buf_size = handle->cfg.buf_size,
  159. .write_bytes = fdesc->length,
  160. };
  161. need_awoke |= handle->cbs.on_convert_done(handle, &evt_data, handle->user_data);
  162. }
  163. }
  164. if (intr_mask & DAC_DMA_TEOF_INTR) {
  165. /* Total end of frame interrupt received, DMA stopped */
  166. atomic_store(&handle->is_running, false);
  167. if (handle->cbs.on_stop) {
  168. need_awoke |= handle->cbs.on_stop(handle, NULL, handle->user_data);
  169. }
  170. }
  171. if (need_awoke == pdTRUE) {
  172. portYIELD_FROM_ISR();
  173. }
  174. }
  175. esp_err_t dac_continuous_new_channels(const dac_continuous_config_t *cont_cfg, dac_continuous_handle_t *ret_handle)
  176. {
  177. #if CONFIG_DAC_ENABLE_DEBUG_LOG
  178. esp_log_level_set(TAG, ESP_LOG_DEBUG);
  179. #endif
  180. /* Parameters validation */
  181. DAC_NULL_POINTER_CHECK(cont_cfg);
  182. DAC_NULL_POINTER_CHECK(ret_handle);
  183. ESP_RETURN_ON_FALSE(cont_cfg->chan_mask <= DAC_CHANNEL_MASK_ALL, ESP_ERR_INVALID_ARG, TAG, "invalid dac channel id");
  184. ESP_RETURN_ON_FALSE(cont_cfg->desc_num > 1, ESP_ERR_INVALID_STATE, TAG, "at least two DMA descriptor needed");
  185. ESP_RETURN_ON_FALSE(!s_dma_in_use, ESP_ERR_INVALID_STATE, TAG, "DMA already in use");
  186. esp_err_t ret = ESP_OK;
  187. /* Register the channels */
  188. for (uint32_t i = 0, mask = cont_cfg->chan_mask; mask; mask >>= 1, i++) {
  189. if (mask & 0x01) {
  190. ESP_GOTO_ON_ERROR(dac_priv_register_channel(i, "dac continuous"),
  191. err4, TAG, "register dac channel %"PRIu32" failed", i);
  192. }
  193. }
  194. /* Allocate continuous mode struct */
  195. dac_continuous_handle_t handle = heap_caps_calloc(1, sizeof(struct dac_continuous_s), DAC_MEM_ALLOC_CAPS);
  196. ESP_RETURN_ON_FALSE(handle, ESP_ERR_NO_MEM, TAG, "no memory for the dac continuous mode structure");
  197. /* Allocate queue and mutex*/
  198. handle->desc_pool = xQueueCreateWithCaps(cont_cfg->desc_num, sizeof(lldesc_t *), DAC_MEM_ALLOC_CAPS);
  199. handle->mutex = xSemaphoreCreateMutexWithCaps(DAC_MEM_ALLOC_CAPS);
  200. ESP_GOTO_ON_FALSE(handle->desc_pool, ESP_ERR_NO_MEM, err3, TAG, "no memory for message queue");
  201. ESP_GOTO_ON_FALSE(handle->mutex, ESP_ERR_NO_MEM, err3, TAG, "no memory for channels mutex");
  202. /* Create PM lock */
  203. #if CONFIG_PM_ENABLE
  204. esp_pm_lock_type_t pm_lock_type = cont_cfg->clk_src == DAC_DIGI_CLK_SRC_APLL ? ESP_PM_NO_LIGHT_SLEEP : ESP_PM_APB_FREQ_MAX;
  205. ESP_GOTO_ON_ERROR(esp_pm_lock_create(pm_lock_type, 0, "dac_driver", &handle->pm_lock), err3, TAG, "Failed to create DAC pm lock");
  206. #endif
  207. handle->chan_cnt = __builtin_popcount(cont_cfg->chan_mask);
  208. memcpy(&(handle->cfg), cont_cfg, sizeof(dac_continuous_config_t));
  209. atomic_init(&handle->is_enabled, false);
  210. atomic_init(&handle->is_cyclic, false);
  211. atomic_init(&handle->is_running, false);
  212. atomic_init(&handle->is_async, false);
  213. /* Allocate DMA buffer */
  214. ESP_GOTO_ON_ERROR(s_dac_alloc_dma_desc(handle), err2, TAG, "Failed to allocate memory for DMA buffers");
  215. /* Initialize DAC DMA peripheral */
  216. ESP_GOTO_ON_ERROR(dac_dma_periph_init(cont_cfg->freq_hz,
  217. cont_cfg->chan_mode == DAC_CHANNEL_MODE_ALTER,
  218. cont_cfg->clk_src == DAC_DIGI_CLK_SRC_APLL),
  219. err2, TAG, "Failed to initialize DAC DMA peripheral");
  220. /* Register DMA interrupt */
  221. ESP_GOTO_ON_ERROR(esp_intr_alloc(dac_dma_periph_get_intr_signal(), DAC_INTR_ALLOC_FLAGS,
  222. s_dac_default_intr_handler, handle, &(handle->intr_handle)),
  223. err1, TAG, "Failed to register DAC DMA interrupt");
  224. /* Connect DAC module to the DMA peripheral */
  225. DAC_RTC_ENTER_CRITICAL();
  226. dac_ll_digi_enable_dma(true);
  227. DAC_RTC_EXIT_CRITICAL();
  228. s_dma_in_use = true;
  229. *ret_handle = handle;
  230. return ret;
  231. err1:
  232. dac_dma_periph_deinit();
  233. err2:
  234. s_dac_free_dma_desc(handle);
  235. err3:
  236. if (handle->desc_pool) {
  237. vQueueDeleteWithCaps(handle->desc_pool);
  238. }
  239. if (handle->mutex) {
  240. vSemaphoreDeleteWithCaps(handle->mutex);
  241. }
  242. free(handle);
  243. err4:
  244. /* Deregister the channels */
  245. for (uint32_t i = 0, mask = cont_cfg->chan_mask; mask; mask >>= 1, i++) {
  246. if (mask & 0x01) {
  247. dac_priv_deregister_channel(i);
  248. }
  249. }
  250. return ret;
  251. }
  252. esp_err_t dac_continuous_del_channels(dac_continuous_handle_t handle)
  253. {
  254. DAC_NULL_POINTER_CHECK(handle);
  255. ESP_RETURN_ON_FALSE(!atomic_load(&handle->is_enabled), ESP_ERR_INVALID_STATE, TAG, "dac continuous output not disabled yet");
  256. /* Deregister DMA interrupt */
  257. if (handle->intr_handle) {
  258. ESP_RETURN_ON_ERROR(esp_intr_free(handle->intr_handle), TAG, "Failed to deregister DMA interrupt");
  259. handle->intr_handle = NULL;
  260. }
  261. /* Deinitialize DMA peripheral */
  262. ESP_RETURN_ON_ERROR(dac_dma_periph_deinit(), TAG, "Failed to deinitialize DAC DMA peripheral");
  263. /* Disconnect DAC module from the DMA peripheral */
  264. DAC_RTC_ENTER_CRITICAL();
  265. dac_ll_digi_enable_dma(false);
  266. DAC_RTC_EXIT_CRITICAL();
  267. /* Free allocated resources */
  268. s_dac_free_dma_desc(handle);
  269. if (handle->desc_pool) {
  270. vQueueDeleteWithCaps(handle->desc_pool);
  271. handle->desc_pool = NULL;
  272. }
  273. if (handle->mutex) {
  274. vSemaphoreDeleteWithCaps(handle->mutex);
  275. handle->mutex = NULL;
  276. }
  277. #if CONFIG_PM_ENABLE
  278. if (handle->pm_lock) {
  279. esp_pm_lock_delete(handle->pm_lock);
  280. handle->pm_lock = NULL;
  281. }
  282. #endif
  283. /* Deregister the channels */
  284. for (uint32_t i = 0, mask = handle->cfg.chan_mask; mask; mask >>= 1, i++) {
  285. if (mask & 0x01) {
  286. dac_priv_deregister_channel(i);
  287. }
  288. }
  289. free(handle);
  290. s_dma_in_use = false;
  291. return ESP_OK;
  292. }
  293. esp_err_t dac_continuous_register_event_callback(dac_continuous_handle_t handle, const dac_event_callbacks_t *callbacks, void *user_data)
  294. {
  295. DAC_NULL_POINTER_CHECK(handle);
  296. if (!callbacks) {
  297. memset(&handle->cbs, 0, sizeof(dac_event_callbacks_t));
  298. return ESP_OK;
  299. }
  300. #if CONFIG_DAC_ISR_IRAM_SAFE
  301. if (callbacks->on_convert_done) {
  302. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_convert_done), ESP_ERR_INVALID_ARG, TAG, "on_convert_done callback not in IRAM");
  303. }
  304. if (callbacks->on_stop) {
  305. ESP_RETURN_ON_FALSE(esp_ptr_in_iram(callbacks->on_stop), ESP_ERR_INVALID_ARG, TAG, "on_stop callback not in IRAM");
  306. }
  307. if (user_data) {
  308. ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, TAG, "user context not in internal RAM");
  309. }
  310. #endif
  311. memcpy(&handle->cbs, callbacks, sizeof(dac_event_callbacks_t));
  312. handle->user_data = user_data;
  313. return ESP_OK;
  314. }
  315. esp_err_t dac_continuous_enable(dac_continuous_handle_t handle)
  316. {
  317. DAC_NULL_POINTER_CHECK(handle);
  318. ESP_RETURN_ON_FALSE(!atomic_load(&handle->is_enabled), ESP_ERR_INVALID_STATE, TAG, "dac continuous has already enabled");
  319. esp_err_t ret = ESP_OK;
  320. /* Reset the descriptor pool */
  321. xQueueReset(handle->desc_pool);
  322. for ( int i = 0; i < handle->cfg.desc_num; i++) {
  323. ESP_GOTO_ON_FALSE(xQueueSend(handle->desc_pool, &handle->desc[i], 0) == pdTRUE,
  324. ESP_ERR_INVALID_STATE, err, TAG, "the descriptor pool is not cleared");
  325. }
  326. #ifdef CONFIG_PM_ENABLE
  327. esp_pm_lock_acquire(handle->pm_lock);
  328. #endif
  329. for (uint32_t i = 0, mask = handle->cfg.chan_mask; mask; mask >>= 1, i++) {
  330. if (mask & 0x01) {
  331. dac_priv_enable_channel(i);
  332. }
  333. }
  334. dac_dma_periph_enable();
  335. esp_intr_enable(handle->intr_handle);
  336. DAC_RTC_ENTER_CRITICAL();
  337. dac_ll_digi_enable_dma(true);
  338. DAC_RTC_EXIT_CRITICAL();
  339. atomic_store(&handle->is_enabled, true);
  340. err:
  341. return ret;
  342. }
  343. esp_err_t dac_continuous_disable(dac_continuous_handle_t handle)
  344. {
  345. DAC_NULL_POINTER_CHECK(handle);
  346. ESP_RETURN_ON_FALSE(atomic_load(&handle->is_enabled), ESP_ERR_INVALID_STATE, TAG, "dac continuous has already disabled");
  347. atomic_store(&handle->is_enabled, false);
  348. dac_dma_periph_disable();
  349. esp_intr_disable(handle->intr_handle);
  350. DAC_RTC_ENTER_CRITICAL();
  351. dac_ll_digi_enable_dma(false);
  352. DAC_RTC_EXIT_CRITICAL();
  353. atomic_store(&handle->is_running, false);
  354. for (uint32_t i = 0, mask = handle->cfg.chan_mask; mask; mask >>= 1, i++) {
  355. if (mask & 0x01) {
  356. dac_priv_disable_channel(i);
  357. }
  358. }
  359. #ifdef CONFIG_PM_ENABLE
  360. esp_pm_lock_release(handle->pm_lock);
  361. #endif
  362. return ESP_OK;
  363. }
  364. esp_err_t dac_continuous_start_async_writing(dac_continuous_handle_t handle)
  365. {
  366. DAC_NULL_POINTER_CHECK(handle);
  367. ESP_RETURN_ON_FALSE(atomic_load(&handle->is_enabled), ESP_ERR_INVALID_STATE, TAG, "dac continuous has not been enabled");
  368. ESP_RETURN_ON_FALSE(handle->cbs.on_convert_done, ESP_ERR_INVALID_STATE, TAG,
  369. "please register 'on_convert_done' callback before starting asynchronous writing");
  370. atomic_store(&handle->is_async, true);
  371. if (atomic_load(&handle->is_cyclic)) {
  372. /* Break the DMA descriptor chain to stop the DMA first */
  373. for (int i = 0; i < handle->cfg.desc_num; i++) {
  374. STAILQ_NEXT(handle->desc[i], qe) = NULL;
  375. }
  376. }
  377. /* Wait for the previous DMA stop */
  378. while (atomic_load(&handle->is_running)) {}
  379. /* Link all descriptors as a ring */
  380. for (int i = 0; i < handle->cfg.desc_num; i++) {
  381. memset(handle->bufs[i], 0, handle->cfg.buf_size);
  382. STAILQ_NEXT(handle->desc[i], qe) = (i < handle->cfg.desc_num - 1) ? handle->desc[i + 1] : handle->desc[0];
  383. }
  384. dac_dma_periph_dma_trans_start((uint32_t)handle->desc[0]);
  385. atomic_store(&handle->is_running, true);
  386. return ESP_OK;
  387. }
  388. esp_err_t dac_continuous_stop_async_writing(dac_continuous_handle_t handle)
  389. {
  390. DAC_NULL_POINTER_CHECK(handle);
  391. ESP_RETURN_ON_FALSE(atomic_load(&handle->is_async), ESP_ERR_INVALID_STATE, TAG, "dac asynchronous writing has not been started");
  392. /* Break the DMA descriptor chain to stop the DMA first */
  393. for (int i = 0; i < handle->cfg.desc_num; i++) {
  394. STAILQ_NEXT(handle->desc[i], qe) = NULL;
  395. }
  396. /* Wait for the previous DMA stop */
  397. while (atomic_load(&handle->is_running)) {}
  398. atomic_store(&handle->is_async, false);
  399. return ESP_OK;
  400. }
  401. /* Buffer expanding coefficient, the input buffer will expand to twice length while enabled AUTO_16_BIT */
  402. #if CONFIG_DAC_DMA_AUTO_16BIT_ALIGN
  403. #define DAC_16BIT_ALIGN_COEFF 2
  404. #else
  405. #define DAC_16BIT_ALIGN_COEFF 1
  406. #endif
  407. static size_t s_dac_load_data_into_buf(dac_continuous_handle_t handle, uint8_t *dest, size_t dest_len, const uint8_t *src, size_t src_len)
  408. {
  409. size_t load_bytes = 0;
  410. #if CONFIG_DAC_DMA_AUTO_16BIT_ALIGN
  411. /* Load the data to the high 8 bit in the 16-bit width slot */
  412. load_bytes = (src_len * 2 > dest_len) ? dest_len : src_len * 2;
  413. for (int i = 0; i < load_bytes; i += 2) {
  414. dest[i + 1] = src[i / 2] + handle->cfg.offset;
  415. }
  416. #else
  417. /* Load the data into the DMA buffer */
  418. load_bytes = (src_len > dest_len) ? dest_len : src_len;
  419. for (int i = 0; i < load_bytes; i++) {
  420. dest[i] = src[i] + handle->cfg.offset;
  421. }
  422. #endif
  423. return load_bytes;
  424. }
  425. esp_err_t dac_continuous_write_asynchronously(dac_continuous_handle_t handle, uint8_t *dma_buf,
  426. size_t dma_buf_len, const uint8_t *data,
  427. size_t data_len, size_t *bytes_loaded)
  428. {
  429. DAC_NULL_POINTER_CHECK_ISR(handle);
  430. DAC_NULL_POINTER_CHECK_ISR(dma_buf);
  431. DAC_NULL_POINTER_CHECK_ISR(data);
  432. ESP_RETURN_ON_FALSE_ISR(atomic_load(&handle->is_async), ESP_ERR_INVALID_STATE, TAG, "The asynchronous writing has not started");
  433. int i;
  434. for (i = 0; i < handle->cfg.desc_num; i++) {
  435. if (dma_buf == handle->bufs[i]) {
  436. break;
  437. }
  438. }
  439. /* Fail to find the DMA buffer address */
  440. ESP_RETURN_ON_FALSE_ISR(i < handle->cfg.desc_num, ESP_ERR_NOT_FOUND, TAG, "Not found the corresponding DMA buffer");
  441. size_t load_bytes = s_dac_load_data_into_buf(handle, dma_buf, dma_buf_len, data, data_len);
  442. lldesc_config(handle->desc[i], LLDESC_HW_OWNED, 1, 0, load_bytes);
  443. if (bytes_loaded) {
  444. *bytes_loaded = load_bytes / DAC_16BIT_ALIGN_COEFF;
  445. }
  446. return ESP_OK;
  447. }
  448. esp_err_t dac_continuous_write_cyclically(dac_continuous_handle_t handle, uint8_t *buf, size_t buf_size, size_t *bytes_loaded)
  449. {
  450. DAC_NULL_POINTER_CHECK(handle);
  451. ESP_RETURN_ON_FALSE(atomic_load(&handle->is_enabled), ESP_ERR_INVALID_STATE, TAG, "This set of DAC channels has not been enabled");
  452. ESP_RETURN_ON_FALSE(!atomic_load(&handle->is_async), ESP_ERR_INVALID_STATE, TAG, "Asynchronous writing is running, can't write cyclically");
  453. ESP_RETURN_ON_FALSE(buf_size <= handle->cfg.buf_size * handle->cfg.desc_num, ESP_ERR_INVALID_ARG, TAG,
  454. "The cyclic buffer size exceeds the total DMA buffer size: %"PRIu32"(desc_num) * %d(buf_size) = %"PRIu32,
  455. handle->cfg.desc_num, handle->cfg.buf_size, handle->cfg.buf_size * handle->cfg.desc_num);
  456. esp_err_t ret = ESP_OK;
  457. xSemaphoreTake(handle->mutex, portMAX_DELAY);
  458. if (atomic_load(&handle->is_cyclic)) {
  459. /* Break the DMA descriptor chain to stop the DMA first */
  460. for (int i = 0; i < handle->cfg.desc_num; i++) {
  461. STAILQ_NEXT(handle->desc[i], qe) = NULL;
  462. }
  463. }
  464. /* Wait for the previous DMA stop */
  465. while (atomic_load(&handle->is_running)) {}
  466. atomic_store(&handle->is_cyclic, true);
  467. size_t src_buf_size = buf_size;
  468. uint32_t split = 1;
  469. int i;
  470. for (i = 0; i < handle->cfg.desc_num && buf_size > 0; i++) {
  471. /* To spread data more averagely, average the last two descriptors */
  472. split = (buf_size * DAC_16BIT_ALIGN_COEFF < handle->cfg.buf_size * 2) ? 3 - split : 1;
  473. size_t load_bytes = s_dac_load_data_into_buf(handle, handle->bufs[i], handle->cfg.buf_size, buf, buf_size / split);
  474. lldesc_config(handle->desc[i], LLDESC_HW_OWNED, 1, 0, load_bytes);
  475. /* Link to the next descriptor */
  476. STAILQ_NEXT(handle->desc[i], qe) = (i < handle->cfg.desc_num - 1) ? handle->desc[i + 1] : NULL;
  477. buf_size -= load_bytes / DAC_16BIT_ALIGN_COEFF;
  478. buf += load_bytes / DAC_16BIT_ALIGN_COEFF;
  479. }
  480. /* Link the tail to the head as a ring */
  481. STAILQ_NEXT(handle->desc[i-1], qe) = handle->desc[0];
  482. dac_dma_periph_dma_trans_start((uint32_t)handle->desc[0]);
  483. atomic_store(&handle->is_running, true);
  484. if (bytes_loaded) {
  485. *bytes_loaded = src_buf_size - buf_size;
  486. }
  487. xSemaphoreGive(handle->mutex);
  488. return ret;
  489. }
  490. static esp_err_t s_dac_wait_to_load_dma_data(dac_continuous_handle_t handle, uint8_t *buf, size_t buf_size, size_t *w_size, TickType_t timeout_tick)
  491. {
  492. lldesc_t *desc;
  493. /* Try to get the descriptor from the pool */
  494. ESP_RETURN_ON_FALSE(xQueueReceive(handle->desc_pool, &desc, timeout_tick) == pdTRUE,
  495. ESP_ERR_TIMEOUT, TAG, "Get available descriptor timeout");
  496. /* To ensure it is not in the pending desc chain */
  497. if (STAILQ_FIRST(&handle->head) != NULL) {
  498. DAC_STAILQ_REMOVE(&handle->head, desc, lldesc_s, qe);
  499. }
  500. static bool split_flag = false;
  501. uint8_t *dma_buf = (uint8_t *)desc->buf;
  502. if (buf_size * DAC_16BIT_ALIGN_COEFF < 2 * handle->cfg.buf_size) {
  503. if (!split_flag) {
  504. buf_size >>= 1;
  505. split_flag = true;
  506. } else {
  507. split_flag = false;
  508. }
  509. }
  510. size_t load_bytes = s_dac_load_data_into_buf(handle, dma_buf, handle->cfg.buf_size, buf, buf_size);
  511. lldesc_config(desc, LLDESC_HW_OWNED, 1, 0, load_bytes);
  512. desc->size = load_bytes;
  513. *w_size = load_bytes / DAC_16BIT_ALIGN_COEFF;
  514. /* Insert the loaded descriptor to the end of the chain, waiting to be sent */
  515. DESC_ENTER_CRITICAL();
  516. STAILQ_INSERT_TAIL(&handle->head, desc, qe);
  517. DESC_EXIT_CRITICAL();
  518. return ESP_OK;
  519. }
  520. esp_err_t dac_continuous_write(dac_continuous_handle_t handle, uint8_t *buf, size_t buf_size, size_t *bytes_loaded, int timeout_ms)
  521. {
  522. DAC_NULL_POINTER_CHECK(handle);
  523. DAC_NULL_POINTER_CHECK(buf);
  524. ESP_RETURN_ON_FALSE(atomic_load(&handle->is_enabled), ESP_ERR_INVALID_STATE, TAG, "This set of DAC channels has not been enabled");
  525. ESP_RETURN_ON_FALSE(!atomic_load(&handle->is_async), ESP_ERR_INVALID_STATE, TAG, "Asynchronous writing is running, can't write synchronously");
  526. esp_err_t ret = ESP_OK;
  527. TickType_t timeout_tick = timeout_ms < 0 ? portMAX_DELAY : pdMS_TO_TICKS(timeout_ms);
  528. ESP_RETURN_ON_FALSE(xSemaphoreTake(handle->mutex, timeout_tick) == pdTRUE, ESP_ERR_TIMEOUT, TAG, "Take semaphore timeout");
  529. size_t w_size = 0;
  530. size_t src_buf_size = buf_size;
  531. /* Reset the desc_pool and chain if called cyclic function last time */
  532. if (atomic_load(&handle->is_cyclic)) {
  533. xQueueReset(handle->desc_pool);
  534. /* Break the chain if DMA still running */
  535. for (int i = 0; i < handle->cfg.desc_num; i++) {
  536. STAILQ_NEXT(handle->desc[i], qe) = NULL;
  537. xQueueSend(handle->desc_pool, &handle->desc[i], 0);
  538. }
  539. STAILQ_INIT(&handle->head);
  540. atomic_store(&handle->is_cyclic, false);
  541. }
  542. /* When there is no descriptor in the chain, DMA has stopped, load data and start the DMA link */
  543. if (STAILQ_FIRST(&handle->head) == NULL) {
  544. /* Wait for the previous DMA stop */
  545. while (atomic_load(&handle->is_running)) {}
  546. for (int i = 0;
  547. i < handle->cfg.desc_num && buf_size > 0;
  548. i++, buf += w_size, buf_size -= w_size) {
  549. ESP_GOTO_ON_ERROR(s_dac_wait_to_load_dma_data(handle, buf, buf_size, &w_size, timeout_tick), err, TAG, "Load data failed");
  550. }
  551. dac_dma_periph_dma_trans_start((uint32_t)(STAILQ_FIRST(&handle->head)));
  552. atomic_store(&handle->is_running, true);
  553. }
  554. /* If the source buffer is not totally loaded, keep loading the rest data */
  555. while (buf_size > 0) {
  556. ESP_GOTO_ON_ERROR(s_dac_wait_to_load_dma_data(handle, buf, buf_size, &w_size, timeout_tick), err, TAG, "Load data failed");
  557. /* If the DMA stopped but there are still some descriptors not sent, start the DMA again */
  558. DESC_ENTER_CRITICAL();
  559. if (STAILQ_FIRST(&handle->head) && !atomic_load(&handle->is_running)) {
  560. dac_dma_periph_dma_trans_start((uint32_t)(STAILQ_FIRST(&handle->head)));
  561. atomic_store(&handle->is_running, true);
  562. }
  563. DESC_EXIT_CRITICAL();
  564. buf += w_size;
  565. buf_size -= w_size;
  566. }
  567. err:
  568. /* The bytes number that has been loaded */
  569. if (bytes_loaded) {
  570. *bytes_loaded = src_buf_size - buf_size;
  571. }
  572. xSemaphoreGive(handle->mutex);
  573. return ret;
  574. }