sdio_slave_hal.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // The HAL layer for SDIO slave (common part)
  15. #include <string.h>
  16. #include "soc/slc_struct.h"
  17. #include "soc/hinf_struct.h"
  18. #include "hal/sdio_slave_types.h"
  19. #include "soc/host_struct.h"
  20. #include "hal/sdio_slave_hal.h"
  21. #include "hal/assert.h"
  22. #include "hal/log.h"
  23. #include "esp_attr.h"
  24. #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\
  25. HAL_LOGE(TAG, "%s", str);\
  26. return ret_val;\
  27. } }while (0)
  28. static const char TAG[] = "SDIO_HAL";
  29. static esp_err_t init_send_queue(sdio_slave_context_t *hal);
  30. /**************** Ring buffer for SDIO sending use *****************/
  31. typedef enum {
  32. RINGBUF_GET_ONE = 0,
  33. RINGBUF_GET_ALL = 1,
  34. } ringbuf_get_all_t;
  35. typedef enum {
  36. RINGBUF_WRITE_PTR,
  37. RINGBUF_READ_PTR,
  38. RINGBUF_FREE_PTR,
  39. } sdio_ringbuf_pointer_t;
  40. static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg);
  41. static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all);
  42. static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr);
  43. #define _SEND_DESC_NEXT(x) STAILQ_NEXT(&((sdio_slave_hal_send_desc_t*)x)->dma_desc, qe)
  44. #define SEND_DESC_NEXT(x) (sdio_slave_hal_send_desc_t*)_SEND_DESC_NEXT(x)
  45. #define SEND_DESC_NEXT_SET(x, target) do { \
  46. _SEND_DESC_NEXT(x)=(lldesc_t*)target; \
  47. }while(0)
  48. static esp_err_t link_desc_to_last(uint8_t* desc, void* arg)
  49. {
  50. SEND_DESC_NEXT_SET(arg, desc);
  51. return ESP_OK;
  52. }
  53. //calculate a pointer with offset to a original pointer of the specific ringbuffer
  54. static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset)
  55. {
  56. uint8_t *buf_ptr;
  57. switch (ptr) {
  58. case RINGBUF_WRITE_PTR:
  59. buf_ptr = buf->write_ptr;
  60. break;
  61. case RINGBUF_READ_PTR:
  62. buf_ptr = buf->read_ptr;
  63. break;
  64. case RINGBUF_FREE_PTR:
  65. buf_ptr = buf->free_ptr;
  66. break;
  67. default:
  68. abort();
  69. }
  70. uint8_t *offset_ptr=buf_ptr+offset;
  71. if (offset_ptr >= buf->data + buf->size) {
  72. offset_ptr -= buf->size;
  73. }
  74. return offset_ptr;
  75. }
  76. static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg)
  77. {
  78. uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, RINGBUF_WRITE_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
  79. esp_err_t err = ESP_OK;
  80. if (copy_callback) {
  81. err = (*copy_callback)(get_ptr, arg);
  82. }
  83. if (err != ESP_OK) return err;
  84. buf->write_ptr = get_ptr;
  85. return ESP_OK;
  86. }
  87. // this ringbuf is a return-before-recv-again strategy
  88. // since this is designed to be called in the ISR, no parallel logic
  89. static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all)
  90. {
  91. HAL_ASSERT(buf->free_ptr == buf->read_ptr); //must return before recv again
  92. if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output
  93. if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data
  94. uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
  95. if (get_all != RINGBUF_GET_ONE) {
  96. buf->read_ptr = buf->write_ptr;
  97. } else {
  98. buf->read_ptr = get_start;
  99. }
  100. if (start != NULL) {
  101. *start = get_start;
  102. }
  103. if (end != NULL) {
  104. *end = buf->read_ptr;
  105. }
  106. return ESP_OK;
  107. }
  108. static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr)
  109. {
  110. HAL_ASSERT(sdio_ringbuf_offset_ptr(buf, RINGBUF_FREE_PTR, SDIO_SLAVE_SEND_DESC_SIZE) == ptr);
  111. size_t size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size;
  112. size_t count = size / SDIO_SLAVE_SEND_DESC_SIZE;
  113. HAL_ASSERT(count * SDIO_SLAVE_SEND_DESC_SIZE==size);
  114. buf->free_ptr = buf->read_ptr;
  115. return count;
  116. }
  117. static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf)
  118. {
  119. if (buf->read_ptr != buf->write_ptr) {
  120. return sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE);
  121. } else {
  122. return NULL;
  123. }
  124. }
  125. static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf)
  126. {
  127. return buf->write_ptr;
  128. }
  129. static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf)
  130. {
  131. return (buf->read_ptr == buf->write_ptr);
  132. }
  133. /**************** End of Ring buffer *****************/
  134. void sdio_slave_hal_init(sdio_slave_context_t *hal)
  135. {
  136. hal->host = sdio_slave_ll_get_host(0);
  137. hal->slc = sdio_slave_ll_get_slc(0);
  138. hal->hinf = sdio_slave_ll_get_hinf(0);
  139. hal->send_state = STATE_IDLE;
  140. hal->recv_link_list = (sdio_slave_hal_recv_stailq_t)STAILQ_HEAD_INITIALIZER(hal->recv_link_list);
  141. init_send_queue(hal);
  142. }
  143. void sdio_slave_hal_hw_init(sdio_slave_context_t *hal)
  144. {
  145. sdio_slave_ll_init(hal->slc);
  146. sdio_slave_ll_enable_hs(hal->hinf, true);
  147. sdio_slave_ll_set_timing(hal->host, hal->timing);
  148. sdio_slave_ll_slvint_t intr_ena = 0xff;
  149. sdio_slave_ll_slvint_set_ena(hal->slc, &intr_ena);
  150. }
  151. static esp_err_t init_send_queue(sdio_slave_context_t *hal)
  152. {
  153. esp_err_t ret;
  154. esp_err_t rcv_res __attribute((unused));
  155. sdio_ringbuf_t *buf = &(hal->send_desc_queue);
  156. //initialize pointers
  157. buf->write_ptr = buf->data;
  158. buf->read_ptr = buf->data;
  159. buf->free_ptr = buf->data;
  160. sdio_slave_hal_send_desc_t *first = NULL, *last = NULL;
  161. //no copy for the first descriptor
  162. ret = sdio_ringbuf_send(buf, NULL, NULL);
  163. if (ret != ESP_OK) return ret;
  164. //loop in the ringbuf to link all the desc one after another as a ring
  165. for (int i = 0; i < hal->send_queue_size + 1; i++) {
  166. rcv_res = sdio_ringbuf_recv(buf, (uint8_t **) &last, NULL, RINGBUF_GET_ONE);
  167. assert (rcv_res == ESP_OK);
  168. ret = sdio_ringbuf_send(buf, link_desc_to_last, last);
  169. if (ret != ESP_OK) return ret;
  170. sdio_ringbuf_return(buf, (uint8_t *) last);
  171. }
  172. first = NULL;
  173. last = NULL;
  174. //clear the queue
  175. rcv_res = sdio_ringbuf_recv(buf, (uint8_t **) &first, (uint8_t **) &last, RINGBUF_GET_ALL);
  176. assert (rcv_res == ESP_OK);
  177. HAL_ASSERT(first == last); //there should be only one desc remain
  178. sdio_ringbuf_return(buf, (uint8_t *) first);
  179. return ESP_OK;
  180. }
  181. void sdio_slave_hal_set_ioready(sdio_slave_context_t *hal, bool ready)
  182. {
  183. sdio_slave_ll_set_ioready(hal->hinf, ready); //set IO ready to 1 to allow host to use
  184. }
  185. /*---------------------------------------------------------------------------
  186. * Send
  187. *
  188. * The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified
  189. * until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during
  190. * the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from
  191. * the one in ``freertos/`` folder) holding descriptors to solve this:
  192. * 1. The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during
  193. * initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one
  194. * to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one:
  195. * now the descriptor is in a ring.
  196. * 2. The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor
  197. * indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a
  198. * ring-like linked-list. The app never touches the part of linked-list being used by the hardware.
  199. * 3. When the hardware needs some data to send, it automatically pick a part of linked descriptors. According to the mode:
  200. * - Buffer mode: only pick the next one to the last one sent;
  201. * - Stream mode: pick the whole unsent linked list, starting from the one above, to the latest linked one.
  202. * The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so
  203. * that it looks like just a linear linked-list rather than a ring to the hardware.
  204. * 4. The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should
  205. * start (in PACKET_MODE).
  206. * 5. When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all
  207. * the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The
  208. * driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again.
  209. ----------------------------------------------------------------------------*/
  210. static inline void send_set_state(sdio_slave_context_t *hal, send_state_t state)
  211. {
  212. hal->send_state = state;
  213. }
  214. static inline send_state_t send_get_state(sdio_slave_context_t* hal)
  215. {
  216. return hal->send_state;
  217. }
  218. DMA_ATTR static const lldesc_t start_desc = {
  219. .owner = 1,
  220. .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used
  221. .size = 1,
  222. .length = 1,
  223. .eof = 1,
  224. };
  225. //force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared.
  226. static void send_isr_invoker_enable(const sdio_slave_context_t *hal)
  227. {
  228. sdio_slave_ll_send_reset(hal->slc);
  229. sdio_slave_ll_send_start(hal->slc, &start_desc);
  230. //wait for rx_done
  231. while(!sdio_slave_ll_send_invoker_ready(hal->slc));
  232. sdio_slave_ll_send_stop(hal->slc);
  233. sdio_slave_ll_send_hostint_clr(hal->host);
  234. }
  235. static void send_isr_invoker_disable(sdio_slave_context_t *hal)
  236. {
  237. sdio_slave_ll_send_part_done_clear(hal->slc);
  238. }
  239. void sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t *hal)
  240. {
  241. sdio_slave_ll_send_part_done_intr_ena(hal->slc, false);
  242. }
  243. //start hw operation with existing data (if exist)
  244. esp_err_t sdio_slave_hal_send_start(sdio_slave_context_t *hal)
  245. {
  246. SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
  247. "already started", ESP_ERR_INVALID_STATE);
  248. send_set_state(hal, STATE_WAIT_FOR_START);
  249. send_isr_invoker_enable(hal);
  250. sdio_slave_ll_send_intr_clr(hal->slc);
  251. sdio_slave_ll_send_intr_ena(hal->slc, true);
  252. return ESP_OK;
  253. }
  254. //only stop hw operations, no touch to data as well as counter
  255. void sdio_slave_hal_send_stop(sdio_slave_context_t *hal)
  256. {
  257. sdio_slave_ll_send_stop(hal->slc);
  258. send_isr_invoker_disable(hal);
  259. sdio_slave_ll_send_intr_ena(hal->slc, false);
  260. send_set_state(hal, STATE_IDLE);
  261. }
  262. static void send_new_packet(sdio_slave_context_t *hal)
  263. {
  264. // since eof is changed, we have to stop and reset the link list,
  265. // and restart new link list operation
  266. sdio_slave_hal_send_desc_t *const start_desc = hal->in_flight_head;
  267. sdio_slave_hal_send_desc_t *const end_desc = hal->in_flight_end;
  268. HAL_ASSERT(start_desc != NULL && end_desc != NULL);
  269. sdio_slave_ll_send_stop(hal->slc);
  270. sdio_slave_ll_send_reset(hal->slc);
  271. sdio_slave_ll_send_start(hal->slc, (lldesc_t*)start_desc);
  272. // update pkt_len register to allow host reading.
  273. sdio_slave_ll_send_write_len(hal->slc, end_desc->pkt_len);
  274. HAL_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", end_desc->pkt_len, sdio_slave_ll_send_read_len(hal->host));
  275. send_set_state(hal, STATE_SENDING);
  276. HAL_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len);
  277. }
  278. static esp_err_t send_check_new_packet(sdio_slave_context_t *hal)
  279. {
  280. esp_err_t ret;
  281. sdio_slave_hal_send_desc_t *start = NULL;
  282. sdio_slave_hal_send_desc_t *end = NULL;
  283. if (hal->sending_mode == SDIO_SLAVE_SEND_PACKET) {
  284. ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &start, (uint8_t **) &end, RINGBUF_GET_ONE);
  285. } else { //stream mode
  286. ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &start, (uint8_t **) &end, RINGBUF_GET_ALL);
  287. }
  288. if (ret == ESP_OK) {
  289. hal->in_flight_head = start;
  290. hal->in_flight_end = end;
  291. end->dma_desc.eof = 1;
  292. //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``.
  293. hal->in_flight_next = SEND_DESC_NEXT(end);
  294. SEND_DESC_NEXT_SET(end, NULL);
  295. }
  296. return ESP_OK;
  297. }
  298. bool sdio_slave_hal_send_eof_happened(sdio_slave_context_t* hal)
  299. {
  300. // Goto idle state (cur_start=NULL) if transmission done,
  301. // also update sequence and recycle descs.
  302. if (sdio_slave_ll_send_done(hal->slc)) {
  303. //check current state
  304. HAL_ASSERT(send_get_state(hal) == STATE_SENDING);
  305. sdio_slave_ll_send_intr_clr(hal->slc);
  306. return true;
  307. } else {
  308. return false;
  309. }
  310. }
  311. //clear counter but keep data
  312. esp_err_t sdio_slave_hal_send_reset_counter(sdio_slave_context_t* hal)
  313. {
  314. SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE,
  315. "reset counter when transmission started", ESP_ERR_INVALID_STATE);
  316. sdio_slave_ll_send_write_len(hal->slc, 0);
  317. HAL_EARLY_LOGV(TAG, "last_len: %08X", sdio_slave_ll_send_read_len(hal->host));
  318. hal->tail_pkt_len = 0;
  319. sdio_slave_hal_send_desc_t *desc = hal->in_flight_head;
  320. while(desc != NULL) {
  321. hal->tail_pkt_len += desc->dma_desc.length;
  322. desc->pkt_len = hal->tail_pkt_len;
  323. desc = SEND_DESC_NEXT(desc);
  324. }
  325. // in theory the desc should be the one right next to the last of in_flight_head,
  326. // but the link of last is NULL, so get the desc from the ringbuf directly.
  327. desc = (sdio_slave_hal_send_desc_t*)sdio_ringbuf_peek_front(&(hal->send_desc_queue));
  328. while(desc != NULL) {
  329. hal->tail_pkt_len += desc->dma_desc.length;
  330. desc->pkt_len = hal->tail_pkt_len;
  331. desc = SEND_DESC_NEXT(desc);
  332. }
  333. return ESP_OK;
  334. }
  335. static esp_err_t send_get_inflight_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_returned_cnt,
  336. bool init)
  337. {
  338. esp_err_t ret;
  339. if (init) {
  340. HAL_ASSERT(hal->returned_desc == NULL);
  341. hal->returned_desc = hal->in_flight_head;
  342. send_set_state(hal, STATE_GETTING_RESULT);
  343. }
  344. if (hal->returned_desc != NULL) {
  345. *out_arg = hal->returned_desc->arg;
  346. hal->returned_desc = SEND_DESC_NEXT(hal->returned_desc);
  347. ret = ESP_OK;
  348. } else {
  349. if (hal->in_flight_head != NULL) {
  350. // fix the link broken of last desc when being sent
  351. HAL_ASSERT(hal->in_flight_end != NULL);
  352. SEND_DESC_NEXT_SET(hal->in_flight_end, hal->in_flight_next);
  353. *out_returned_cnt = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*)hal->in_flight_head);
  354. }
  355. hal->in_flight_head = NULL;
  356. hal->in_flight_end = NULL;
  357. ret = ESP_ERR_NOT_FOUND;
  358. }
  359. return ret;
  360. }
  361. static esp_err_t send_get_unsent_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
  362. {
  363. esp_err_t ret;
  364. sdio_slave_hal_send_desc_t *head, *tail;
  365. ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &head, (uint8_t **) &tail, RINGBUF_GET_ONE);
  366. if (ret == ESP_OK) {
  367. //currently each packet takes only one desc.
  368. HAL_ASSERT(head == tail);
  369. (*out_arg) = head->arg;
  370. (*out_return_cnt) = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*) head);
  371. } else if (ret == ESP_ERR_NOT_FOUND) {
  372. // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to
  373. // send never queued.
  374. // Go to idle state (cur_end!=NULL and cur_start=NULL)
  375. send_set_state(hal, STATE_IDLE);
  376. hal->tail_pkt_len = sdio_slave_ll_send_read_len(hal->host);
  377. }
  378. return ret;
  379. }
  380. esp_err_t sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t *hal, void **out_arg, uint32_t* out_returned_cnt)
  381. {
  382. bool init = (send_get_state(hal) == STATE_SENDING);
  383. if (init) {
  384. HAL_ASSERT(hal->in_flight_head != NULL);
  385. } else {
  386. HAL_ASSERT(send_get_state(hal) == STATE_GETTING_RESULT);
  387. }
  388. *out_returned_cnt = 0;
  389. esp_err_t ret = send_get_inflight_desc(hal, out_arg, out_returned_cnt, init);
  390. if (ret == ESP_ERR_NOT_FOUND) {
  391. // Go to wait for packet state
  392. send_set_state(hal, STATE_WAIT_FOR_START);
  393. }
  394. return ret;
  395. }
  396. esp_err_t sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt)
  397. {
  398. esp_err_t ret = ESP_OK;
  399. *out_return_cnt = 0;
  400. bool init = (send_get_state(hal) == STATE_IDLE);
  401. if (!init) {
  402. if (send_get_state(hal) != STATE_GETTING_RESULT && send_get_state(hal) != STATE_GETTING_UNSENT_DESC) {
  403. return ESP_ERR_INVALID_STATE;
  404. }
  405. }
  406. if (init || send_get_state(hal) == STATE_GETTING_RESULT) {
  407. ret = send_get_inflight_desc(hal, out_arg, out_return_cnt, init);
  408. if (ret == ESP_ERR_NOT_FOUND) {
  409. send_set_state(hal, STATE_GETTING_UNSENT_DESC);
  410. }
  411. }
  412. if (send_get_state(hal) == STATE_GETTING_UNSENT_DESC) {
  413. ret = send_get_unsent_desc(hal, out_arg, out_return_cnt);
  414. if (ret == ESP_ERR_NOT_FOUND) {
  415. send_set_state(hal, STATE_IDLE);
  416. }
  417. }
  418. return ret;
  419. }
  420. esp_err_t sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t *hal)
  421. {
  422. esp_err_t ret;
  423. // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready.
  424. // Note we may also enter this state by stopping sending in the app.
  425. if (send_get_state(hal) == STATE_WAIT_FOR_START) {
  426. if (hal->in_flight_head == NULL) {
  427. send_check_new_packet(hal);
  428. }
  429. // Go to sending state (cur_start and cur_end != NULL) if has packet to send.
  430. if (hal->in_flight_head) {
  431. send_new_packet(hal);
  432. ret = ESP_OK;
  433. } else {
  434. ret = ESP_ERR_NOT_FOUND;
  435. }
  436. } else {
  437. ret = ESP_ERR_INVALID_STATE;
  438. }
  439. return ret;
  440. }
  441. static esp_err_t send_write_desc(uint8_t* desc, void* arg)
  442. {
  443. sdio_slave_hal_send_desc_t* next_desc = SEND_DESC_NEXT(desc);
  444. memcpy(desc, arg, sizeof(sdio_slave_hal_send_desc_t));
  445. SEND_DESC_NEXT_SET(desc, next_desc);
  446. return ESP_OK;
  447. }
  448. static void send_isr_invoke(sdio_slave_context_t *hal)
  449. {
  450. sdio_slave_ll_send_part_done_intr_ena(hal->slc, true);
  451. }
  452. esp_err_t sdio_slave_hal_send_queue(sdio_slave_context_t* hal, uint8_t *addr, size_t len, void *arg)
  453. {
  454. hal->tail_pkt_len += len;
  455. sdio_slave_hal_send_desc_t new_desc = {
  456. .dma_desc = {
  457. .size = len,
  458. .length = len,
  459. .buf = addr,
  460. .owner = 1,
  461. // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent
  462. .eof = (hal->sending_mode == SDIO_SLAVE_SEND_PACKET),
  463. },
  464. .arg = arg,
  465. .pkt_len = hal->tail_pkt_len,
  466. };
  467. esp_err_t ret = sdio_ringbuf_send(&(hal->send_desc_queue), send_write_desc, &new_desc);
  468. send_isr_invoke(hal);
  469. return ret;
  470. }
  471. /*---------------------------------------------------------------------------
  472. * Receive
  473. *--------------------------------------------------------------------------*/
  474. static lldesc_t* recv_get_first_empty_buf(sdio_slave_context_t* hal)
  475. {
  476. sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
  477. lldesc_t *desc = STAILQ_FIRST(queue);
  478. while(desc && desc->owner == 0) {
  479. desc = STAILQ_NEXT(desc, qe);
  480. }
  481. return desc;
  482. }
  483. void sdio_slave_hal_recv_stop(sdio_slave_context_t* hal)
  484. {
  485. sdio_slave_ll_set_ioready(hal->hinf, false); //set IO ready to 0 to stop host from using
  486. sdio_slave_ll_send_stop(hal->slc);
  487. sdio_slave_ll_recv_stop(hal->slc);
  488. sdio_slave_ll_recv_intr_ena(hal->slc, false);
  489. }
  490. //touching linked list, should be protected by spinlock
  491. bool sdio_slave_hal_recv_has_next_item(sdio_slave_context_t* hal)
  492. {
  493. if (hal->recv_cur_ret == NULL || hal->recv_cur_ret->owner != 0) return false;
  494. // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty,
  495. // in this case the ``tx_done`` should happen no longer until new desc is appended.
  496. // The app is responsible to place the pointer to the right place again when appending new desc.
  497. hal->recv_cur_ret = STAILQ_NEXT(hal->recv_cur_ret, qe);
  498. return true;
  499. }
  500. bool sdio_slave_hal_recv_done(sdio_slave_context_t *hal)
  501. {
  502. bool ret = sdio_slave_ll_recv_done(hal->slc);
  503. if (ret) {
  504. sdio_slave_ll_recv_done_clear(hal->slc);
  505. }
  506. return ret;
  507. }
  508. lldesc_t *sdio_slave_hal_recv_unload_desc(sdio_slave_context_t *hal)
  509. {
  510. sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
  511. lldesc_t *desc = STAILQ_FIRST(queue);
  512. if (desc) {
  513. STAILQ_REMOVE_HEAD(queue, qe);
  514. }
  515. return desc;
  516. }
  517. void sdio_slave_hal_recv_init_desc(sdio_slave_context_t* hal, lldesc_t *desc, uint8_t *start)
  518. {
  519. *desc = (lldesc_t) {
  520. .size = hal->recv_buffer_size,
  521. .buf = start,
  522. };
  523. }
  524. void sdio_slave_hal_recv_start(sdio_slave_context_t *hal)
  525. {
  526. sdio_slave_ll_recv_reset(hal->slc);
  527. lldesc_t *desc = recv_get_first_empty_buf(hal);
  528. if (!desc) {
  529. HAL_LOGD(TAG, "recv: restart without desc");
  530. } else {
  531. //the counter is handled when add/flush/reset
  532. sdio_slave_ll_recv_start(hal->slc, desc);
  533. sdio_slave_ll_recv_intr_ena(hal->slc, true);
  534. }
  535. }
  536. void sdio_slave_hal_recv_reset_counter(sdio_slave_context_t *hal)
  537. {
  538. sdio_slave_ll_recv_size_reset(hal->slc);
  539. lldesc_t *desc = recv_get_first_empty_buf(hal);
  540. while (desc != NULL) {
  541. sdio_slave_ll_recv_size_inc(hal->slc);
  542. desc = STAILQ_NEXT(desc, qe);
  543. }
  544. }
  545. void sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t *hal)
  546. {
  547. sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list;
  548. lldesc_t *desc = STAILQ_FIRST(queue);
  549. assert (desc != NULL && desc->owner == 0);
  550. STAILQ_REMOVE_HEAD(queue, qe);
  551. desc->owner = 1;
  552. STAILQ_INSERT_TAIL(queue, desc, qe);
  553. sdio_slave_ll_recv_size_inc(hal->slc);
  554. //we only add it to the tail here, without start the DMA nor increase buffer num.
  555. }
  556. void sdio_slave_hal_load_buf(sdio_slave_context_t *hal, lldesc_t *desc)
  557. {
  558. sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list);
  559. desc->owner = 1;
  560. lldesc_t *const tail = STAILQ_LAST(queue, lldesc_s, qe);
  561. STAILQ_INSERT_TAIL(queue, desc, qe);
  562. if (hal->recv_cur_ret == NULL) {
  563. hal->recv_cur_ret = desc;
  564. }
  565. if (tail == NULL) {
  566. //no one in the ll, start new ll operation.
  567. sdio_slave_ll_recv_start(hal->slc, desc);
  568. sdio_slave_ll_recv_intr_ena(hal->slc, true);
  569. HAL_LOGV(TAG, "recv_load_buf: start new");
  570. } else {
  571. //restart former ll operation
  572. sdio_slave_ll_recv_restart(hal->slc);
  573. HAL_LOGV(TAG, "recv_load_buf: restart");
  574. }
  575. sdio_slave_ll_recv_size_inc(hal->slc);
  576. }
  577. static inline void show_queue_item(lldesc_t *item)
  578. {
  579. HAL_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner);
  580. HAL_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next);
  581. }
  582. static void __attribute((unused)) dump_queue(sdio_slave_hal_recv_stailq_t *queue)
  583. {
  584. int cnt = 0;
  585. lldesc_t *item = NULL;
  586. HAL_EARLY_LOGI(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last);
  587. STAILQ_FOREACH(item, queue, qe) {
  588. cnt++;
  589. show_queue_item(item);
  590. }
  591. HAL_EARLY_LOGI(TAG, "total: %d", cnt);
  592. }
  593. /*---------------------------------------------------------------------------
  594. * Host
  595. *--------------------------------------------------------------------------*/
  596. void sdio_slave_hal_hostint_get_ena(sdio_slave_context_t *hal, sdio_slave_hostint_t *out_int_mask)
  597. {
  598. *out_int_mask = sdio_slave_ll_host_get_intena(hal->host);
  599. }
  600. void sdio_slave_hal_hostint_clear(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
  601. {
  602. sdio_slave_ll_host_intr_clear(hal->host, mask);//clear all interrupts
  603. }
  604. void sdio_slave_hal_hostint_set_ena(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
  605. {
  606. sdio_slave_ll_host_set_intena(hal->host, mask);
  607. }
  608. void sdio_slave_hal_hostint_send(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask)
  609. {
  610. sdio_slave_ll_host_send_int(hal->slc, mask);
  611. }
  612. uint8_t sdio_slave_hal_host_get_reg(sdio_slave_context_t *hal, int pos)
  613. {
  614. return sdio_slave_ll_host_get_reg(hal->host, pos);
  615. }
  616. void sdio_slave_hal_host_set_reg(sdio_slave_context_t *hal, int pos, uint8_t reg)
  617. {
  618. sdio_slave_ll_host_set_reg(hal->host, pos, reg);
  619. }
  620. void sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t *hal, sdio_slave_ll_slvint_t *out_int_mask)
  621. {
  622. sdio_slave_ll_slvint_fetch_clear(hal->slc, out_int_mask);
  623. }