esp_async_memcpy.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. // Copyright 2020 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "freertos/FreeRTOS.h"
  15. #include "freertos/semphr.h"
  16. #include "hal/dma_types.h"
  17. #include "esp_compiler.h"
  18. #include "esp_heap_caps.h"
  19. #include "esp_log.h"
  20. #include "esp_async_memcpy.h"
  21. #include "esp_async_memcpy_impl.h"
  22. static const char *TAG = "async_memcpy";
  23. #define ASMCP_CHECK(a, msg, tag, ret, ...) \
  24. do \
  25. { \
  26. if (unlikely(!(a))) \
  27. { \
  28. ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
  29. ret_code = ret; \
  30. goto tag; \
  31. } \
  32. } while (0)
  33. /**
  34. * @brief Type of async mcp stream
  35. * mcp stream inherits DMA descriptor, besides that, it has a callback function member
  36. */
  37. typedef struct {
  38. dma_descriptor_t desc;
  39. async_memcpy_isr_cb_t cb;
  40. void *cb_args;
  41. } async_memcpy_stream_t;
  42. /**
  43. * @brief Type of async mcp driver context
  44. */
  45. typedef struct async_memcpy_context_t {
  46. async_memcpy_impl_t mcp_impl; // implementation layer
  47. portMUX_TYPE spinlock; // spinlock, prevent operating descriptors concurrently
  48. intr_handle_t intr_hdl; // interrupt handle
  49. uint32_t flags; // extra driver flags
  50. dma_descriptor_t *tx_desc; // pointer to the next free TX descriptor
  51. dma_descriptor_t *rx_desc; // pointer to the next free RX descriptor
  52. dma_descriptor_t *next_rx_desc_to_check; // pointer to the next RX descriptor to recycle
  53. uint32_t max_stream_num; // maximum number of streams
  54. async_memcpy_stream_t *out_streams; // pointer to the first TX stream
  55. async_memcpy_stream_t *in_streams; // pointer to the first RX stream
  56. async_memcpy_stream_t streams_pool[0]; // stream pool (TX + RX), the size is configured during driver installation
  57. } async_memcpy_context_t;
  58. esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_memcpy_t *asmcp)
  59. {
  60. esp_err_t ret_code = ESP_OK;
  61. async_memcpy_context_t *mcp_hdl = NULL;
  62. ASMCP_CHECK(config, "configuration can't be null", err, ESP_ERR_INVALID_ARG);
  63. ASMCP_CHECK(asmcp, "can't assign mcp handle to null", err, ESP_ERR_INVALID_ARG);
  64. // context memory size + stream pool size
  65. size_t total_malloc_size = sizeof(async_memcpy_context_t) + sizeof(async_memcpy_stream_t) * config->backlog * 2;
  66. // to work when cache is disabled, the driver handle should located in SRAM
  67. mcp_hdl = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
  68. ASMCP_CHECK(mcp_hdl, "allocate context memory failed", err, ESP_ERR_NO_MEM);
  69. mcp_hdl->flags = config->flags;
  70. mcp_hdl->out_streams = mcp_hdl->streams_pool;
  71. mcp_hdl->in_streams = mcp_hdl->streams_pool + config->backlog;
  72. mcp_hdl->max_stream_num = config->backlog;
  73. // circle TX/RX descriptors
  74. for (size_t i = 0; i < mcp_hdl->max_stream_num; i++) {
  75. mcp_hdl->out_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
  76. mcp_hdl->out_streams[i].desc.next = &mcp_hdl->out_streams[i + 1].desc;
  77. mcp_hdl->in_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
  78. mcp_hdl->in_streams[i].desc.next = &mcp_hdl->in_streams[i + 1].desc;
  79. }
  80. mcp_hdl->out_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->out_streams[0].desc;
  81. mcp_hdl->in_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->in_streams[0].desc;
  82. mcp_hdl->tx_desc = &mcp_hdl->out_streams[0].desc;
  83. mcp_hdl->rx_desc = &mcp_hdl->in_streams[0].desc;
  84. mcp_hdl->next_rx_desc_to_check = &mcp_hdl->in_streams[0].desc;
  85. mcp_hdl->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
  86. // initialize implementation layer
  87. async_memcpy_impl_init(&mcp_hdl->mcp_impl);
  88. *asmcp = mcp_hdl;
  89. async_memcpy_impl_start(&mcp_hdl->mcp_impl, (intptr_t)&mcp_hdl->out_streams[0].desc, (intptr_t)&mcp_hdl->in_streams[0].desc);
  90. return ESP_OK;
  91. err:
  92. if (mcp_hdl) {
  93. free(mcp_hdl);
  94. }
  95. if (asmcp) {
  96. *asmcp = NULL;
  97. }
  98. return ret_code;
  99. }
  100. esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp)
  101. {
  102. esp_err_t ret_code = ESP_OK;
  103. ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
  104. async_memcpy_impl_stop(&asmcp->mcp_impl);
  105. async_memcpy_impl_deinit(&asmcp->mcp_impl);
  106. free(asmcp);
  107. return ESP_OK;
  108. err:
  109. return ret_code;
  110. }
  111. static int async_memcpy_prepare_receive(async_memcpy_t asmcp, void *buffer, size_t size, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc)
  112. {
  113. uint32_t prepared_length = 0;
  114. uint8_t *buf = (uint8_t *)buffer;
  115. dma_descriptor_t *desc = asmcp->rx_desc; // descriptor iterator
  116. dma_descriptor_t *start = desc;
  117. dma_descriptor_t *end = desc;
  118. while (size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
  119. if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
  120. desc->dw0.suc_eof = 0;
  121. desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  122. desc->buffer = &buf[prepared_length];
  123. desc = desc->next; // move to next descriptor
  124. prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  125. size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  126. } else {
  127. // out of RX descriptors
  128. goto _exit;
  129. }
  130. }
  131. if (size) {
  132. if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
  133. end = desc; // the last descriptor used
  134. desc->dw0.suc_eof = 0;
  135. desc->dw0.size = size;
  136. desc->buffer = &buf[prepared_length];
  137. desc = desc->next; // move to next descriptor
  138. prepared_length += size;
  139. } else {
  140. // out of RX descriptors
  141. goto _exit;
  142. }
  143. }
  144. _exit:
  145. *start_desc = start;
  146. *end_desc = end;
  147. return prepared_length;
  148. }
  149. static int async_memcpy_prepare_transmit(async_memcpy_t asmcp, void *buffer, size_t len, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc)
  150. {
  151. uint32_t prepared_length = 0;
  152. uint8_t *buf = (uint8_t *)buffer;
  153. dma_descriptor_t *desc = asmcp->tx_desc; // descriptor iterator
  154. dma_descriptor_t *start = desc;
  155. dma_descriptor_t *end = desc;
  156. while (len > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
  157. if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
  158. desc->dw0.suc_eof = 0; // not the end of the transaction
  159. desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  160. desc->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  161. desc->buffer = &buf[prepared_length];
  162. desc = desc->next; // move to next descriptor
  163. prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  164. len -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
  165. } else {
  166. // out of TX descriptors
  167. goto _exit;
  168. }
  169. }
  170. if (len) {
  171. if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
  172. end = desc; // the last descriptor used
  173. desc->dw0.suc_eof = 1; // end of the transaction
  174. desc->dw0.size = len;
  175. desc->dw0.length = len;
  176. desc->buffer = &buf[prepared_length];
  177. desc = desc->next; // move to next descriptor
  178. prepared_length += len;
  179. } else {
  180. // out of TX descriptors
  181. goto _exit;
  182. }
  183. }
  184. *start_desc = start;
  185. *end_desc = end;
  186. _exit:
  187. return prepared_length;
  188. }
  189. static bool async_memcpy_get_next_rx_descriptor(async_memcpy_t asmcp, dma_descriptor_t *eof_desc, dma_descriptor_t **next_desc)
  190. {
  191. dma_descriptor_t *next = asmcp->next_rx_desc_to_check;
  192. // additional check, to avoid potential interrupt got triggered by mistake
  193. if (next->dw0.owner == DMA_DESCRIPTOR_BUFFER_OWNER_CPU) {
  194. asmcp->next_rx_desc_to_check = asmcp->next_rx_desc_to_check->next;
  195. *next_desc = next;
  196. // return if we need to continue
  197. return eof_desc == next ? false : true;
  198. }
  199. *next_desc = NULL;
  200. return false;
  201. }
  202. esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n, async_memcpy_isr_cb_t cb_isr, void *cb_args)
  203. {
  204. esp_err_t ret_code = ESP_OK;
  205. dma_descriptor_t *rx_start_desc = NULL;
  206. dma_descriptor_t *rx_end_desc = NULL;
  207. dma_descriptor_t *tx_start_desc = NULL;
  208. dma_descriptor_t *tx_end_desc = NULL;
  209. size_t rx_prepared_size = 0;
  210. size_t tx_prepared_size = 0;
  211. ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
  212. ASMCP_CHECK(async_memcpy_impl_is_buffer_address_valid(&asmcp->mcp_impl, src, dst), "buffer address not valid", err, ESP_ERR_INVALID_ARG);
  213. ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG);
  214. // Prepare TX and RX descriptor
  215. portENTER_CRITICAL_SAFE(&asmcp->spinlock);
  216. rx_prepared_size = async_memcpy_prepare_receive(asmcp, dst, n, &rx_start_desc, &rx_end_desc);
  217. tx_prepared_size = async_memcpy_prepare_transmit(asmcp, src, n, &tx_start_desc, &tx_end_desc);
  218. if ((rx_prepared_size == n) && (tx_prepared_size == n)) {
  219. // register user callback to the last descriptor
  220. async_memcpy_stream_t *mcp_stream = __containerof(rx_end_desc, async_memcpy_stream_t, desc);
  221. mcp_stream->cb = cb_isr;
  222. mcp_stream->cb_args = cb_args;
  223. // restart RX firstly
  224. dma_descriptor_t *desc = rx_start_desc;
  225. while (desc != rx_end_desc) {
  226. desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  227. desc = desc->next;
  228. }
  229. desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  230. asmcp->rx_desc = desc->next;
  231. // restart TX secondly
  232. desc = tx_start_desc;
  233. while (desc != tx_end_desc) {
  234. desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  235. desc = desc->next;
  236. }
  237. desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
  238. asmcp->tx_desc = desc->next;
  239. async_memcpy_impl_restart(&asmcp->mcp_impl);
  240. }
  241. portEXIT_CRITICAL_SAFE(&asmcp->spinlock);
  242. // It's unlikely that we have space for rx descriptor but no space for tx descriptor
  243. // Both tx and rx descriptor should move in the same pace
  244. ASMCP_CHECK(rx_prepared_size == n, "out of rx descriptor", err, ESP_FAIL);
  245. ASMCP_CHECK(tx_prepared_size == n, "out of tx descriptor", err, ESP_FAIL);
  246. return ESP_OK;
  247. err:
  248. return ret_code;
  249. }
  250. IRAM_ATTR void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl)
  251. {
  252. bool to_continue = false;
  253. async_memcpy_stream_t *in_stream = NULL;
  254. dma_descriptor_t *next_desc = NULL;
  255. async_memcpy_context_t *asmcp = __containerof(impl, async_memcpy_context_t, mcp_impl);
  256. // get the RX eof descriptor address
  257. dma_descriptor_t *eof = (dma_descriptor_t *)impl->rx_eof_addr;
  258. // traversal all unchecked descriptors
  259. do {
  260. portENTER_CRITICAL_ISR(&asmcp->spinlock);
  261. // There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by M2M DMA working mechanism)
  262. // And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA
  263. to_continue = async_memcpy_get_next_rx_descriptor(asmcp, eof, &next_desc);
  264. portEXIT_CRITICAL_ISR(&asmcp->spinlock);
  265. if (next_desc) {
  266. in_stream = __containerof(next_desc, async_memcpy_stream_t, desc);
  267. // invoke user registered callback if available
  268. if (in_stream->cb) {
  269. async_memcpy_event_t e = {0};
  270. if (in_stream->cb(asmcp, &e, in_stream->cb_args)) {
  271. impl->isr_need_yield = true;
  272. }
  273. in_stream->cb = NULL;
  274. in_stream->cb_args = NULL;
  275. }
  276. }
  277. } while (to_continue);
  278. }