sdio_slave_hal.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. // The HAL layer for SDIO slave (common part)
  7. #include <string.h>
  8. #include <inttypes.h>
  9. #include "soc/slc_struct.h"
  10. #include "soc/hinf_struct.h"
  11. #include "hal/sdio_slave_types.h"
  12. #include "soc/host_struct.h"
  13. #include "hal/sdio_slave_hal.h"
  14. #include "hal/assert.h"
  15. #include "hal/log.h"
  16. #include "esp_attr.h"
  17. #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
  18. HAL_LOGE(TAG, "%s", str);\
  19. return ret_val;\
  20. } }while (0)
  21. /* The tag may be unused if log level is set to NONE */
  22. static const __attribute__((unused)) char TAG[] = "SDIO_HAL";
  23. static esp_err_t init_send_queue(sdio_slave_context_t *hal);
  24. /**************** Ring buffer for SDIO sending use *****************/
  25. typedef enum {
  26. RINGBUF_GET_ONE = 0,
  27. RINGBUF_GET_ALL = 1,
  28. } ringbuf_get_all_t;
  29. typedef enum {
  30. RINGBUF_WRITE_PTR,
  31. RINGBUF_READ_PTR,
  32. RINGBUF_FREE_PTR,
  33. } sdio_ringbuf_pointer_t;
  34. static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg);
  35. static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, sdio_slave_hal_send_desc_t **start, sdio_slave_hal_send_desc_t **end, ringbuf_get_all_t get_all);
  36. static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr);
  37. #define _SEND_DESC_NEXT(x) STAILQ_NEXT(&((sdio_slave_hal_send_desc_t*)x)->dma_desc, qe)
  38. #define SEND_DESC_NEXT(x) (sdio_slave_hal_send_desc_t*)_SEND_DESC_NEXT(x)
  39. #define SEND_DESC_NEXT_SET(x, target) do { \
  40. _SEND_DESC_NEXT(x)=(sdio_slave_ll_desc_t*)target; \
  41. }while(0)
  42. static esp_err_t link_desc_to_last(uint8_t* desc, void* arg)
  43. {
  44. SEND_DESC_NEXT_SET(arg, desc);
  45. return ESP_OK;
  46. }
  47. //calculate a pointer with offset to a original pointer of the specific ringbuffer
  48. static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset)
  49. {
  50. uint8_t *buf_ptr;
  51. switch (ptr) {
  52. case RINGBUF_WRITE_PTR:
  53. buf_ptr = buf->write_ptr;
  54. break;
  55. case RINGBUF_READ_PTR:
  56. buf_ptr = buf->read_ptr;
  57. break;
  58. case RINGBUF_FREE_PTR:
  59. buf_ptr = buf->free_ptr;
  60. break;
  61. default:
  62. abort();
  63. }
  64. uint8_t *offset_ptr=buf_ptr+offset;
  65. if (offset_ptr >= buf->data + buf->size) {
  66. offset_ptr -= buf->size;
  67. }
  68. return offset_ptr;
  69. }
  70. static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg)
  71. {
  72. uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, RINGBUF_WRITE_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
  73. esp_err_t err = ESP_OK;
  74. if (copy_callback) {
  75. err = (*copy_callback)(get_ptr, arg);
  76. }
  77. if (err != ESP_OK) return err;
  78. buf->write_ptr = get_ptr;
  79. return ESP_OK;
  80. }
  81. // this ringbuf is a return-before-recv-again strategy
  82. // since this is designed to be called in the ISR, no parallel logic
  83. static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, sdio_slave_hal_send_desc_t **start, sdio_slave_hal_send_desc_t **end, ringbuf_get_all_t get_all)
  84. {
  85. HAL_ASSERT(buf->free_ptr == buf->read_ptr); //must return before recv again
  86. if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output
  87. if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data
  88. uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
  89. if (get_all != RINGBUF_GET_ONE) {
  90. buf->read_ptr = buf->write_ptr;
  91. } else {
  92. buf->read_ptr = get_start;
  93. }
  94. if (start != NULL) {
  95. *start = (sdio_slave_hal_send_desc_t *) get_start;
  96. }
  97. if (end != NULL) {
  98. *end = (sdio_slave_hal_send_desc_t *) buf->read_ptr;
  99. }
  100. return ESP_OK;
  101. }
  102. static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
  103. {
  104. HAL_ASSERT(sdio_ringbuf_offset_ptr(buf, RINGBUF_FREE_PTR, SDIO_SLAVE_SEND_DESC_SIZE) == ptr);
  105. size_t size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size;
  106. size_t count = size / SDIO_SLAVE_SEND_DESC_SIZE;
  107. HAL_ASSERT(count * SDIO_SLAVE_SEND_DESC_SIZE==size);
  108. buf->free_ptr = buf->read_ptr;
  109. return count;
  110. }
  111. static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf)
  112. {
  113. if (buf->read_ptr != buf->write_ptr) {
  114. return sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
  115. } else {
  116. return NULL;
  117. }
  118. }
  119. static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf)
  120. {
  121. return buf->write_ptr;
  122. }
  123. static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf)
  124. {
  125. return (buf->read_ptr == buf->write_ptr);
  126. }
  127. /**************** End of Ring buffer *****************/
  128. void sdio_slave_hal_init(sdio_slave_context_t *hal)
  129. {
  130. hal->host = sdio_slave_ll_get_host(0);
  131. hal->slc = sdio_slave_ll_get_slc(0);
  132. hal->hinf = sdio_slave_ll_get_hinf(0);
  133. hal->send_state = STATE_IDLE;
  134. hal->recv_link_list = (sdio_slave_hal_recv_stailq_t)STAILQ_HEAD_INITIALIZER(hal->recv_link_list);
  135. init_send_queue(hal);
  136. }
  137. void sdio_slave_hal_hw_init(sdio_slave_context_t *hal)
  138. {
  139. sdio_slave_ll_init(hal->slc);
  140. sdio_slave_ll_enable_hs(hal->hinf, !hal->no_highspeed);
  141. sdio_slave_ll_set_timing(hal->host, hal->timing);
  142. sdio_slave_ll_slvint_t intr_ena = 0xff;
  143. sdio_slave_ll_slvint_set_ena(hal->slc, &intr_ena);
  144. }
  145. static esp_err_t init_send_queue(sdio_slave_context_t *hal)
  146. {
  147. esp_err_t ret;
  148. esp_err_t rcv_res __attribute((unused));
  149. sdio_ringbuf_t *buf = &(hal->send_desc_queue);
  150. //initialize pointers
  151. buf->write_ptr = buf->data;
  152. buf->read_ptr = buf->data;
  153. buf->free_ptr = buf->data;
  154. sdio_slave_hal_send_desc_t *first = NULL, *last = NULL;
  155. //no copy for the first descriptor
  156. ret = sdio_ringbuf_send(buf, NULL, NULL);
  157. if (ret != ESP_OK) return ret;
  158. //loop in the ringbuf to link all the desc one after another as a ring
  159. for (int i = 0; i < hal->send_queue_size + 1; i++) {
  160. rcv_res = sdio_ringbuf_recv(buf, &last, NULL, RINGBUF_GET_ONE);
  161. assert (rcv_res == ESP_OK);
  162. ret = sdio_ringbuf_send(buf, link_desc_to_last, last);
  163. if (ret != ESP_OK) return ret;
  164. sdio_ringbuf_return(buf, (uint8_t *) last);
  165. }
  166. first = NULL;
  167. last = NULL;
  168. //clear the queue
  169. rcv_res = sdio_ringbuf_recv(buf, &first, &last, RINGBUF_GET_ALL);
  170. assert (rcv_res == ESP_OK);
  171. HAL_ASSERT(first == last); //there should be only one desc remain
  172. sdio_ringbuf_return(buf, (uint8_t *) first);
  173. return ESP_OK;
  174. }
  175. void sdio_slave_hal_set_ioready(sdio_slave_context_t *hal, bool ready)
  176. {
  177. sdio_slave_ll_set_ioready(hal->hinf, ready); //set IO ready to 1 to allow host to use
  178. }
  179. /*---------------------------------------------------------------------------
  180. * Send
  181. *
  182. * The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified
  183. * until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during
  184. * the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from
  185. * the one in ``freertos/`` folder) holding descriptors to solve this:
  186. * 1. The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during
  187. * initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one
  188. * to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one:
  189. * now the descriptor is in a ring.
  190. * 2. The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor
  191. * indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a
  192. * ring-like linked-list. The app never touches the part of linked-list being used by the hardware.
  193. * 3. When the hardware needs some data to send, it automatically pick a part of linked descriptors. According to the mode:
  194. * - Buffer mode: only pick the next one to the last one sent;
  195. * - Stream mode: pick the whole unsent linked list, starting from the one above, to the latest linked one.
  196. * The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so
  197. * that it looks like just a linear linked-list rather than a ring to the hardware.
  198. * 4. The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should
  199. * start (in PACKET_MODE).
  200. * 5. When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all
  201. * the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The
  202. * driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again.
  203. ----------------------------------------------------------------------------*/
  204. static inline void send_set_state(sdio_slave_context_t *hal, send_state_t state)
  205. {
  206. hal->send_state = state;
  207. }
  208. static inline send_state_t send_get_state(sdio_slave_context_t* hal)
  209. {
  210. return hal->send_state;
  211. }
  212. DMA_ATTR static const sdio_slave_ll_desc_t start_desc = {
  213. .owner = 1,
  214. .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used
  215. .size = 1,
  216. .length = 1,
  217. .eof = 1,
  218. };
  219. //force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared.
  220. static void send_isr_invoker_enable(const sdio_slave_context_t *hal)
  221. {
  222. sdio_slave_ll_send_reset(hal->slc);
  223. sdio_slave_ll_send_start(hal->slc, &start_desc);
  224. //wait for rx_done
  225. while(!sdio_slave_ll_send_invoker_ready(hal->slc));
  226. sdio_slave_ll_send_stop(hal->slc);
  227. sdio_slave_ll_send_hostint_clr(hal->host);
  228. }
  229. static void send_isr_invoker_disable(sdio_slave_context_t *hal)
  230. {
  231. sdio_slave_ll_send_part_done_clear(hal->slc);
  232. }
  233. void sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t *hal)
  234. {
  235. sdio_slave_ll_send_part_done_intr_ena(hal->slc, false);
  236. }
  237. //start hw operation with existing data (if exist)
  238. esp_err_t sdio_slave_hal_send_start(sdio_slave_context_t *hal)
  239. {
  240. SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
  241. "already started", ESP_ERR_INVALID_STATE);
  242. send_set_state(hal, STATE_WAIT_FOR_START);
  243. send_isr_invoker_enable(hal);
  244. sdio_slave_ll_send_intr_clr(hal->slc);
  245. sdio_slave_ll_send_intr_ena(hal->slc, true);
  246. return ESP_OK;
  247. }
  248. //only stop hw operations, no touch to data as well as counter
  249. void sdio_slave_hal_send_stop(sdio_slave_context_t *hal)
  250. {
  251. sdio_slave_ll_send_stop(hal->slc);
  252. send_isr_invoker_disable(hal);
  253. sdio_slave_ll_send_intr_ena(hal->slc, false);
  254. send_set_state(hal, STATE_IDLE);
  255. }
  256. static void send_new_packet(sdio_slave_context_t *hal)
  257. {
  258. // since eof is changed, we have to stop and reset the link list,
  259. // and restart new link list operation
  260. sdio_slave_hal_send_desc_t *const start_desc = hal->in_flight_head;
  261. sdio_slave_hal_send_desc_t *const end_desc = hal->in_flight_end;
  262. HAL_ASSERT(start_desc != NULL && end_desc != NULL);
  263. sdio_slave_ll_send_stop(hal->slc);
  264. sdio_slave_ll_send_reset(hal->slc);
  265. sdio_slave_ll_send_start(hal->slc, (sdio_slave_ll_desc_t*)start_desc);
  266. // update pkt_len register to allow host reading.
  267. sdio_slave_ll_send_write_len(hal->slc, end_desc->pkt_len);
  268. HAL_EARLY_LOGV(TAG, "send_length_write: %"PRIu32", last_len: %08"PRIX32"", end_desc->pkt_len, sdio_slave_ll_send_read_len(hal->host));
  269. send_set_state(hal, STATE_SENDING);
  270. HAL_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %"PRIu32"", start_desc, end_desc, end_desc->pkt_len);
  271. }
  272. static esp_err_t send_check_new_packet(sdio_slave_context_t *hal)
  273. {
  274. esp_err_t ret;
  275. sdio_slave_hal_send_desc_t *start = NULL;
  276. sdio_slave_hal_send_desc_t *end = NULL;
  277. if (hal->sending_mode == SDIO_SLAVE_SEND_PACKET) {
  278. ret = sdio_ringbuf_recv(&(hal->send_desc_queue), &start, &end, RINGBUF_GET_ONE);
  279. } else { //stream mode
  280. ret = sdio_ringbuf_recv(&(hal->send_desc_queue), &start, &end, RINGBUF_GET_ALL);
  281. }
  282. if (ret == ESP_OK) {
  283. hal->in_flight_head = start;
  284. hal->in_flight_end = end;
  285. end->dma_desc.eof = 1;
  286. //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``.
  287. hal->in_flight_next = SEND_DESC_NEXT(end);
  288. SEND_DESC_NEXT_SET(end, NULL);
  289. }
  290. return ESP_OK;
  291. }
  292. bool sdio_slave_hal_send_eof_happened(sdio_slave_context_t* hal)
  293. {
  294. // Goto idle state (cur_start=NULL) if transmission done,
  295. // also update sequence and recycle descs.
  296. if (sdio_slave_ll_send_done(hal->slc)) {
  297. //check current state
  298. HAL_ASSERT(send_get_state(hal) == STATE_SENDING);
  299. sdio_slave_ll_send_intr_clr(hal->slc);
  300. return true;
  301. } else {
  302. return false;
  303. }
  304. }
  305. //clear counter but keep data
  306. esp_err_t sdio_slave_hal_send_reset_counter(sdio_slave_context_t* hal)
  307. {
  308. SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
  309. "reset counter when transmission started", ESP_ERR_INVALID_STATE);
  310. sdio_slave_ll_send_write_len(hal->slc, 0);
  311. HAL_EARLY_LOGV(TAG, "last_len: %08X", sdio_slave_ll_send_read_len(hal->host));
  312. hal->tail_pkt_len = 0;
  313. sdio_slave_hal_send_desc_t *desc = hal->in_flight_head;
  314. while(desc != NULL) {
  315. hal->tail_pkt_len += desc->dma_desc.length;
  316. desc->pkt_len = hal->tail_pkt_len;
  317. desc = SEND_DESC_NEXT(desc);
  318. }
  319. // in theory the desc should be the one right next to the last of in_flight_head,
  320. // but the link of last is NULL, so get the desc from the ringbuf directly.
  321. desc = (sdio_slave_hal_send_desc_t*)sdio_ringbuf_peek_front(&(hal->send_desc_queue));
  322. while(desc != NULL) {
  323. hal->tail_pkt_len += desc->dma_desc.length;
  324. desc->pkt_len = hal->tail_pkt_len;
  325. desc = SEND_DESC_NEXT(desc);
  326. }
  327. return ESP_OK;
  328. }
  329. static esp_err_t send_get_inflight_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_returned_cnt,
  330. bool init)
  331. {
  332. esp_err_t ret;
  333. if (init) {
  334. HAL_ASSERT(hal->returned_desc == NULL);
  335. hal->returned_desc = hal->in_flight_head;
  336. send_set_state(hal, STATE_GETTING_RESULT);
  337. }
  338. if (hal->returned_desc != NULL) {
  339. *out_arg = hal->returned_desc->arg;
  340. hal->returned_desc = SEND_DESC_NEXT(hal->returned_desc);
  341. ret = ESP_OK;
  342. } else {
  343. if (hal->in_flight_head != NULL) {
  344. // fix the link broken of last desc when being sent
  345. HAL_ASSERT(hal->in_flight_end != NULL);
  346. SEND_DESC_NEXT_SET(hal->in_flight_end, hal->in_flight_next);
  347. *out_returned_cnt = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*)hal->in_flight_head);
  348. }
  349. hal->in_flight_head = NULL;
  350. hal->in_flight_end = NULL;
  351. ret = ESP_ERR_NOT_FOUND;
  352. }
  353. return ret;
  354. }
  355. static esp_err_t send_get_unsent_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
  356. {
  357. esp_err_t ret;
  358. sdio_slave_hal_send_desc_t *head = NULL;
  359. sdio_slave_hal_send_desc_t *tail = NULL;
  360. ret = sdio_ringbuf_recv(&(hal->send_desc_queue), &head, &tail, RINGBUF_GET_ONE);
  361. if (ret == ESP_OK) {
  362. //currently each packet takes only one desc.
  363. HAL_ASSERT(head == tail);
  364. (*out_arg) = head->arg;
  365. (*out_return_cnt) = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*) head);
  366. } else if (ret == ESP_ERR_NOT_FOUND) {
  367. // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to
  368. // send never queued.
  369. // Go to idle state (cur_end!=NULL and cur_start=NULL)
  370. send_set_state(hal, STATE_IDLE);
  371. hal->tail_pkt_len = sdio_slave_ll_send_read_len(hal->host);
  372. }
  373. return ret;
  374. }
  375. esp_err_t sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t *hal, void **out_arg, uint32_t* out_returned_cnt)
  376. {
  377. bool init = (send_get_state(hal) == STATE_SENDING);
  378. if (init) {
  379. HAL_ASSERT(hal->in_flight_head != NULL);
  380. } else {
  381. HAL_ASSERT(send_get_state(hal) == STATE_GETTING_RESULT);
  382. }
  383. *out_returned_cnt = 0;
  384. esp_err_t ret = send_get_inflight_desc(hal, out_arg, out_returned_cnt, init);
  385. if (ret == ESP_ERR_NOT_FOUND) {
  386. // Go to wait for packet state
  387. send_set_state(hal, STATE_WAIT_FOR_START);
  388. }
  389. return ret;
  390. }
  391. esp_err_t sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
  392. {
  393. esp_err_t ret = ESP_OK;
  394. *out_return_cnt = 0;
  395. bool init = (send_get_state(hal) == STATE_IDLE);
  396. if (!init) {
  397. if (send_get_state(hal) != STATE_GETTING_RESULT && send_get_state(hal) != STATE_GETTING_UNSENT_DESC) {
  398. return ESP_ERR_INVALID_STATE;
  399. }
  400. }
  401. if (init || send_get_state(hal) == STATE_GETTING_RESULT) {
  402. ret = send_get_inflight_desc(hal, out_arg, out_return_cnt, init);
  403. if (ret == ESP_ERR_NOT_FOUND) {
  404. send_set_state(hal, STATE_GETTING_UNSENT_DESC);
  405. }
  406. }
  407. if (send_get_state(hal) == STATE_GETTING_UNSENT_DESC) {
  408. ret = send_get_unsent_desc(hal, out_arg, out_return_cnt);
  409. if (ret == ESP_ERR_NOT_FOUND) {
  410. send_set_state(hal, STATE_IDLE);
  411. }
  412. }
  413. return ret;
  414. }
  415. esp_err_t sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t *hal)
  416. {
  417. esp_err_t ret;
  418. // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready.
  419. // Note we may also enter this state by stopping sending in the app.
  420. if (send_get_state(hal) == STATE_WAIT_FOR_START) {
  421. if (hal->in_flight_head == NULL) {
  422. send_check_new_packet(hal);
  423. }
  424. // Go to sending state (cur_start and cur_end != NULL) if has packet to send.
  425. if (hal->in_flight_head) {
  426. send_new_packet(hal);
  427. ret = ESP_OK;
  428. } else {
  429. ret = ESP_ERR_NOT_FOUND;
  430. }
  431. } else {
  432. ret = ESP_ERR_INVALID_STATE;
  433. }
  434. return ret;
  435. }
  436. static esp_err_t send_write_desc(uint8_t* desc, void* arg)
  437. {
  438. sdio_slave_hal_send_desc_t* next_desc = SEND_DESC_NEXT(desc);
  439. memcpy(desc, arg, sizeof(sdio_slave_hal_send_desc_t));
  440. SEND_DESC_NEXT_SET(desc, next_desc);
  441. return ESP_OK;
  442. }
  443. static void send_isr_invoke(sdio_slave_context_t *hal)
  444. {
  445. sdio_slave_ll_send_part_done_intr_ena(hal->slc, true);
  446. }
  447. esp_err_t sdio_slave_hal_send_queue(sdio_slave_context_t* hal, uint8_t *addr, size_t len, void *arg)
  448. {
  449. hal->tail_pkt_len += len;
  450. sdio_slave_hal_send_desc_t new_desc = {
  451. .dma_desc = {
  452. .size = len,
  453. .length = len,
  454. .buf = addr,
  455. .owner = 1,
  456. // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent
  457. .eof = (hal->sending_mode == SDIO_SLAVE_SEND_PACKET),
  458. },
  459. .arg = arg,
  460. .pkt_len = hal->tail_pkt_len,
  461. };
  462. esp_err_t ret = sdio_ringbuf_send(&(hal->send_desc_queue), send_write_desc, &new_desc);
  463. send_isr_invoke(hal);
  464. return ret;
  465. }
  466. /*---------------------------------------------------------------------------
  467. * Receive
  468. *--------------------------------------------------------------------------*/
  469. static sdio_slave_ll_desc_t* recv_get_first_empty_buf(sdio_slave_context_t* hal)
  470. {
  471. sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
  472. sdio_slave_ll_desc_t *desc = STAILQ_FIRST(queue);
  473. while(desc && desc->owner == 0) {
  474. desc = STAILQ_NEXT(desc, qe);
  475. }
  476. return desc;
  477. }
  478. void sdio_slave_hal_recv_stop(sdio_slave_context_t* hal)
  479. {
  480. sdio_slave_ll_set_ioready(hal->hinf, false); //set IO ready to 0 to stop host from using
  481. sdio_slave_ll_send_stop(hal->slc);
  482. sdio_slave_ll_recv_stop(hal->slc);
  483. sdio_slave_ll_recv_intr_ena(hal->slc, false);
  484. }
  485. //touching linked list, should be protected by spinlock
  486. bool sdio_slave_hal_recv_has_next_item(sdio_slave_context_t* hal)
  487. {
  488. if (hal->recv_cur_ret == NULL || hal->recv_cur_ret->owner != 0) return false;
  489. // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty,
  490. // in this case the ``tx_done`` should happen no longer until new desc is appended.
  491. // The app is responsible to place the pointer to the right place again when appending new desc.
  492. hal->recv_cur_ret = STAILQ_NEXT(hal->recv_cur_ret, qe);
  493. return true;
  494. }
  495. bool sdio_slave_hal_recv_done(sdio_slave_context_t *hal)
  496. {
  497. bool ret = sdio_slave_ll_recv_done(hal->slc);
  498. if (ret) {
  499. sdio_slave_ll_recv_done_clear(hal->slc);
  500. }
  501. return ret;
  502. }
  503. sdio_slave_ll_desc_t *sdio_slave_hal_recv_unload_desc(sdio_slave_context_t *hal)
  504. {
  505. sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
  506. sdio_slave_ll_desc_t *desc = STAILQ_FIRST(queue);
  507. if (desc) {
  508. STAILQ_REMOVE_HEAD(queue, qe);
  509. }
  510. return desc;
  511. }
  512. void sdio_slave_hal_recv_init_desc(sdio_slave_context_t* hal, sdio_slave_ll_desc_t *desc, uint8_t *start)
  513. {
  514. *desc = (sdio_slave_ll_desc_t) {
  515. .size = hal->recv_buffer_size,
  516. .buf = start,
  517. };
  518. }
  519. void sdio_slave_hal_recv_start(sdio_slave_context_t *hal)
  520. {
  521. sdio_slave_ll_recv_reset(hal->slc);
  522. sdio_slave_ll_desc_t *desc = recv_get_first_empty_buf(hal);
  523. if (!desc) {
  524. HAL_LOGD(TAG, "recv: restart without desc");
  525. } else {
  526. //the counter is handled when add/flush/reset
  527. sdio_slave_ll_recv_start(hal->slc, desc);
  528. sdio_slave_ll_recv_intr_ena(hal->slc, true);
  529. }
  530. }
  531. void sdio_slave_hal_recv_reset_counter(sdio_slave_context_t *hal)
  532. {
  533. sdio_slave_ll_recv_size_reset(hal->slc);
  534. sdio_slave_ll_desc_t *desc = recv_get_first_empty_buf(hal);
  535. while (desc != NULL) {
  536. sdio_slave_ll_recv_size_inc(hal->slc);
  537. desc = STAILQ_NEXT(desc, qe);
  538. }
  539. }
  540. void sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t *hal)
  541. {
  542. sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
  543. sdio_slave_ll_desc_t *desc = STAILQ_FIRST(queue);
  544. assert (desc != NULL && desc->owner == 0);
  545. STAILQ_REMOVE_HEAD(queue, qe);
  546. desc->owner = 1;
  547. STAILQ_INSERT_TAIL(queue, desc, qe);
  548. sdio_slave_ll_recv_size_inc(hal->slc);
  549. //we only add it to the tail here, without start the DMA nor increase buffer num.
  550. }
  551. void sdio_slave_hal_load_buf(sdio_slave_context_t *hal, sdio_slave_ll_desc_t *desc)
  552. {
  553. sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
  554. desc->owner = 1;
  555. sdio_slave_ll_desc_t *const tail = STAILQ_LAST(queue, sdio_slave_ll_desc_s, qe);
  556. STAILQ_INSERT_TAIL(queue, desc, qe);
  557. if (hal->recv_cur_ret == NULL) {
  558. hal->recv_cur_ret = desc;
  559. }
  560. if (tail == NULL) {
  561. //no one in the ll, start new ll operation.
  562. sdio_slave_ll_recv_start(hal->slc, desc);
  563. sdio_slave_ll_recv_intr_ena(hal->slc, true);
  564. HAL_LOGV(TAG, "recv_load_buf: start new");
  565. } else {
  566. //restart former ll operation
  567. sdio_slave_ll_recv_restart(hal->slc);
  568. HAL_LOGV(TAG, "recv_load_buf: restart");
  569. }
  570. sdio_slave_ll_recv_size_inc(hal->slc);
  571. }
  572. static inline void show_queue_item(sdio_slave_ll_desc_t *item)
  573. {
  574. HAL_EARLY_LOGI(TAG, "=> %p: size: %"PRIu32"(%"PRIu32"), eof: %"PRIu32", owner: %"PRIu32"", item, item->size, item->length, item->eof, item->owner);
  575. HAL_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next);
  576. }
  577. static void __attribute((unused)) dump_queue(sdio_slave_hal_recv_stailq_t *queue)
  578. {
  579. int cnt = 0;
  580. sdio_slave_ll_desc_t *item = NULL;
  581. HAL_EARLY_LOGI(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last);
  582. STAILQ_FOREACH(item, queue, qe) {
  583. cnt++;
  584. show_queue_item(item);
  585. }
  586. HAL_EARLY_LOGI(TAG, "total: %d", cnt);
  587. }
  588. /*---------------------------------------------------------------------------
  589. * Host
  590. *--------------------------------------------------------------------------*/
  591. void sdio_slave_hal_hostint_get_ena(sdio_slave_context_t *hal, sdio_slave_hostint_t *out_int_mask)
  592. {
  593. *out_int_mask = sdio_slave_ll_host_get_intena(hal->host);
  594. }
  595. void sdio_slave_hal_hostint_clear(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
  596. {
  597. sdio_slave_ll_host_intr_clear(hal->host, mask);//clear all interrupts
  598. }
  599. void sdio_slave_hal_hostint_set_ena(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
  600. {
  601. sdio_slave_ll_host_set_intena(hal->host, mask);
  602. }
  603. void sdio_slave_hal_hostint_send(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
  604. {
  605. sdio_slave_ll_host_send_int(hal->slc, mask);
  606. }
  607. uint8_t sdio_slave_hal_host_get_reg(sdio_slave_context_t *hal, int pos)
  608. {
  609. return sdio_slave_ll_host_get_reg(hal->host, pos);
  610. }
  611. void sdio_slave_hal_host_set_reg(sdio_slave_context_t *hal, int pos, uint8_t reg)
  612. {
  613. sdio_slave_ll_host_set_reg(hal->host, pos, reg);
  614. }
  615. void sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t *hal, sdio_slave_ll_slvint_t *out_int_mask)
  616. {
  617. sdio_slave_ll_slvint_fetch_clear(hal->slc, out_int_mask);
  618. }