sdmmc_transaction.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <string.h>
  15. #include "esp_err.h"
  16. #include "esp_log.h"
  17. #include "esp_pm.h"
  18. #include "freertos/FreeRTOS.h"
  19. #include "freertos/queue.h"
  20. #include "freertos/semphr.h"
  21. #include "freertos/task.h"
  22. #include "soc/sdmmc_periph.h"
  23. #include "soc/soc_memory_layout.h"
  24. #include "driver/sdmmc_types.h"
  25. #include "driver/sdmmc_defs.h"
  26. #include "driver/sdmmc_host.h"
  27. #include "sdmmc_private.h"
  28. /* Number of DMA descriptors used for transfer.
  29. * Increasing this value above 4 doesn't improve performance for the usual case
  30. * of SD memory cards (most data transfers are multiples of 512 bytes).
  31. */
  32. #define SDMMC_DMA_DESC_CNT 4
  33. static const char* TAG = "sdmmc_req";
  34. typedef enum {
  35. SDMMC_IDLE,
  36. SDMMC_SENDING_CMD,
  37. SDMMC_SENDING_DATA,
  38. SDMMC_BUSY,
  39. } sdmmc_req_state_t;
  40. typedef struct {
  41. uint8_t* ptr;
  42. size_t size_remaining;
  43. size_t next_desc;
  44. size_t desc_remaining;
  45. } sdmmc_transfer_state_t;
  46. const uint32_t SDMMC_DATA_ERR_MASK =
  47. SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
  48. SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE |
  49. SDMMC_INTMASK_EBE;
  50. const uint32_t SDMMC_DMA_DONE_MASK =
  51. SDMMC_IDMAC_INTMASK_RI | SDMMC_IDMAC_INTMASK_TI |
  52. SDMMC_IDMAC_INTMASK_NI;
  53. const uint32_t SDMMC_CMD_ERR_MASK =
  54. SDMMC_INTMASK_RTO |
  55. SDMMC_INTMASK_RCRC |
  56. SDMMC_INTMASK_RESP_ERR;
  57. static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
  58. static sdmmc_transfer_state_t s_cur_transfer = { 0 };
  59. static QueueHandle_t s_request_mutex;
  60. static bool s_is_app_cmd; // This flag is set if the next command is an APP command
  61. #ifdef CONFIG_PM_ENABLE
  62. static esp_pm_lock_handle_t s_pm_lock;
  63. #endif
  64. static esp_err_t handle_idle_state_events(void);
  65. static sdmmc_hw_cmd_t make_hw_cmd(sdmmc_command_t* cmd);
  66. static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
  67. sdmmc_event_t* unhandled_events);
  68. static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
  69. sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
  70. static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
  71. static void fill_dma_descriptors(size_t num_desc);
  72. static size_t get_free_descriptors_count(void);
  73. static bool wait_for_busy_cleared(int timeout_ms);
  74. esp_err_t sdmmc_host_transaction_handler_init(void)
  75. {
  76. assert(s_request_mutex == NULL);
  77. s_request_mutex = xSemaphoreCreateMutex();
  78. if (!s_request_mutex) {
  79. return ESP_ERR_NO_MEM;
  80. }
  81. s_is_app_cmd = false;
  82. #ifdef CONFIG_PM_ENABLE
  83. esp_err_t err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "sdmmc", &s_pm_lock);
  84. if (err != ESP_OK) {
  85. vSemaphoreDelete(s_request_mutex);
  86. s_request_mutex = NULL;
  87. return err;
  88. }
  89. #endif
  90. return ESP_OK;
  91. }
  92. void sdmmc_host_transaction_handler_deinit(void)
  93. {
  94. assert(s_request_mutex);
  95. #ifdef CONFIG_PM_ENABLE
  96. esp_pm_lock_delete(s_pm_lock);
  97. s_pm_lock = NULL;
  98. #endif
  99. vSemaphoreDelete(s_request_mutex);
  100. s_request_mutex = NULL;
  101. }
  102. esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
  103. {
  104. esp_err_t ret;
  105. xSemaphoreTake(s_request_mutex, portMAX_DELAY);
  106. #ifdef CONFIG_PM_ENABLE
  107. esp_pm_lock_acquire(s_pm_lock);
  108. #endif
  109. // dispose of any events which happened asynchronously
  110. handle_idle_state_events();
  111. // convert cmdinfo to hardware register value
  112. sdmmc_hw_cmd_t hw_cmd = make_hw_cmd(cmdinfo);
  113. if (cmdinfo->data) {
  114. // Length should be either <4 or >=4 and =0 (mod 4).
  115. if (cmdinfo->datalen >= 4 && cmdinfo->datalen % 4 != 0) {
  116. ESP_LOGD(TAG, "%s: invalid size: total=%d",
  117. __func__, cmdinfo->datalen);
  118. ret = ESP_ERR_INVALID_SIZE;
  119. goto out;
  120. }
  121. if ((intptr_t) cmdinfo->data % 4 != 0 ||
  122. !esp_ptr_dma_capable(cmdinfo->data)) {
  123. ESP_LOGD(TAG, "%s: buffer %p can not be used for DMA", __func__, cmdinfo->data);
  124. ret = ESP_ERR_INVALID_ARG;
  125. goto out;
  126. }
  127. // this clears "owned by IDMAC" bits
  128. memset(s_dma_desc, 0, sizeof(s_dma_desc));
  129. // initialize first descriptor
  130. s_dma_desc[0].first_descriptor = 1;
  131. // save transfer info
  132. s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
  133. s_cur_transfer.size_remaining = cmdinfo->datalen;
  134. s_cur_transfer.next_desc = 0;
  135. s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
  136. // prepare descriptors
  137. fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
  138. // write transfer info into hardware
  139. sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen);
  140. }
  141. // write command into hardware, this also sends the command to the card
  142. ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
  143. if (ret != ESP_OK) {
  144. goto out;
  145. }
  146. // process events until transfer is complete
  147. cmdinfo->error = ESP_OK;
  148. sdmmc_req_state_t state = SDMMC_SENDING_CMD;
  149. sdmmc_event_t unhandled_events = { 0 };
  150. while (state != SDMMC_IDLE) {
  151. ret = handle_event(cmdinfo, &state, &unhandled_events);
  152. if (ret != ESP_OK) {
  153. break;
  154. }
  155. }
  156. if (ret == ESP_OK && (cmdinfo->flags & SCF_WAIT_BUSY)) {
  157. if (!wait_for_busy_cleared(cmdinfo->timeout_ms)) {
  158. ret = ESP_ERR_TIMEOUT;
  159. }
  160. }
  161. s_is_app_cmd = (ret == ESP_OK && cmdinfo->opcode == MMC_APP_CMD);
  162. out:
  163. #ifdef CONFIG_PM_ENABLE
  164. esp_pm_lock_release(s_pm_lock);
  165. #endif
  166. xSemaphoreGive(s_request_mutex);
  167. return ret;
  168. }
  169. static size_t get_free_descriptors_count(void)
  170. {
  171. const size_t next = s_cur_transfer.next_desc;
  172. size_t count = 0;
  173. /* Starting with the current DMA descriptor, count the number of
  174. * descriptors which have 'owned_by_idmac' set to 0. These are the
  175. * descriptors already processed by the DMA engine.
  176. */
  177. for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
  178. sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
  179. if (desc->owned_by_idmac) {
  180. break;
  181. }
  182. ++count;
  183. if (desc->next_desc_ptr == NULL) {
  184. /* final descriptor in the chain */
  185. break;
  186. }
  187. }
  188. return count;
  189. }
  190. static void fill_dma_descriptors(size_t num_desc)
  191. {
  192. for (size_t i = 0; i < num_desc; ++i) {
  193. if (s_cur_transfer.size_remaining == 0) {
  194. return;
  195. }
  196. const size_t next = s_cur_transfer.next_desc;
  197. sdmmc_desc_t* desc = &s_dma_desc[next];
  198. assert(!desc->owned_by_idmac);
  199. size_t size_to_fill =
  200. (s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
  201. s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
  202. bool last = size_to_fill == s_cur_transfer.size_remaining;
  203. desc->last_descriptor = last;
  204. desc->second_address_chained = 1;
  205. desc->owned_by_idmac = 1;
  206. desc->buffer1_ptr = s_cur_transfer.ptr;
  207. desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
  208. assert(size_to_fill < 4 || size_to_fill % 4 == 0);
  209. desc->buffer1_size = (size_to_fill + 3) & (~3);
  210. s_cur_transfer.size_remaining -= size_to_fill;
  211. s_cur_transfer.ptr += size_to_fill;
  212. s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
  213. ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
  214. num_desc, next, s_cur_transfer.size_remaining,
  215. s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
  216. }
  217. }
  218. static esp_err_t handle_idle_state_events(void)
  219. {
  220. /* Handle any events which have happened in between transfers.
  221. * Under current assumptions (no SDIO support) only card detect events
  222. * can happen in the idle state.
  223. */
  224. sdmmc_event_t evt;
  225. while (sdmmc_host_wait_for_event(0, &evt) == ESP_OK) {
  226. if (evt.sdmmc_status & SDMMC_INTMASK_CD) {
  227. ESP_LOGV(TAG, "card detect event");
  228. evt.sdmmc_status &= ~SDMMC_INTMASK_CD;
  229. }
  230. if (evt.sdmmc_status != 0 || evt.dma_status != 0) {
  231. ESP_LOGE(TAG, "handle_idle_state_events unhandled: %08x %08x",
  232. evt.sdmmc_status, evt.dma_status);
  233. }
  234. }
  235. return ESP_OK;
  236. }
  237. static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
  238. sdmmc_event_t* unhandled_events)
  239. {
  240. sdmmc_event_t event;
  241. esp_err_t err = sdmmc_host_wait_for_event(cmd->timeout_ms / portTICK_PERIOD_MS, &event);
  242. if (err != ESP_OK) {
  243. ESP_LOGE(TAG, "sdmmc_host_wait_for_event returned 0x%x", err);
  244. if (err == ESP_ERR_TIMEOUT) {
  245. sdmmc_host_dma_stop();
  246. }
  247. return err;
  248. }
  249. ESP_LOGV(TAG, "sdmmc_handle_event: event %08x %08x, unhandled %08x %08x",
  250. event.sdmmc_status, event.dma_status,
  251. unhandled_events->sdmmc_status, unhandled_events->dma_status);
  252. event.sdmmc_status |= unhandled_events->sdmmc_status;
  253. event.dma_status |= unhandled_events->dma_status;
  254. process_events(event, cmd, state, unhandled_events);
  255. ESP_LOGV(TAG, "sdmmc_handle_event: events unhandled: %08x %08x", unhandled_events->sdmmc_status, unhandled_events->dma_status);
  256. return ESP_OK;
  257. }
  258. static bool cmd_needs_auto_stop(const sdmmc_command_t* cmd)
  259. {
  260. /* SDMMC host needs an "auto stop" flag for the following commands: */
  261. return cmd->datalen > 0 &&
  262. (cmd->opcode == MMC_WRITE_BLOCK_MULTIPLE ||
  263. cmd->opcode == MMC_READ_BLOCK_MULTIPLE ||
  264. cmd->opcode == MMC_WRITE_DAT_UNTIL_STOP ||
  265. cmd->opcode == MMC_READ_DAT_UNTIL_STOP);
  266. }
  267. static sdmmc_hw_cmd_t make_hw_cmd(sdmmc_command_t* cmd)
  268. {
  269. sdmmc_hw_cmd_t res = { 0 };
  270. res.cmd_index = cmd->opcode;
  271. if (cmd->opcode == MMC_STOP_TRANSMISSION) {
  272. res.stop_abort_cmd = 1;
  273. } else if (cmd->opcode == MMC_GO_IDLE_STATE) {
  274. res.send_init = 1;
  275. } else {
  276. res.wait_complete = 1;
  277. }
  278. if (cmd->opcode == MMC_GO_IDLE_STATE) {
  279. res.send_init = 1;
  280. }
  281. if (cmd->flags & SCF_RSP_PRESENT) {
  282. res.response_expect = 1;
  283. if (cmd->flags & SCF_RSP_136) {
  284. res.response_long = 1;
  285. }
  286. }
  287. if (cmd->flags & SCF_RSP_CRC) {
  288. res.check_response_crc = 1;
  289. }
  290. res.use_hold_reg = 1;
  291. if (cmd->data) {
  292. res.data_expected = 1;
  293. if ((cmd->flags & SCF_CMD_READ) == 0) {
  294. res.rw = 1;
  295. }
  296. assert(cmd->datalen % cmd->blklen == 0);
  297. res.send_auto_stop = cmd_needs_auto_stop(cmd) ? 1 : 0;
  298. }
  299. ESP_LOGV(TAG, "%s: opcode=%d, rexp=%d, crc=%d, auto_stop=%d", __func__,
  300. res.cmd_index, res.response_expect, res.check_response_crc,
  301. res.send_auto_stop);
  302. return res;
  303. }
  304. static void process_command_response(uint32_t status, sdmmc_command_t* cmd)
  305. {
  306. if (cmd->flags & SCF_RSP_PRESENT) {
  307. if (cmd->flags & SCF_RSP_136) {
  308. /* Destination is 4-byte aligned, can memcopy from peripheral registers */
  309. memcpy(cmd->response, (uint32_t*) SDMMC.resp, 4 * sizeof(uint32_t));
  310. } else {
  311. cmd->response[0] = SDMMC.resp[0];
  312. cmd->response[1] = 0;
  313. cmd->response[2] = 0;
  314. cmd->response[3] = 0;
  315. }
  316. }
  317. esp_err_t err = ESP_OK;
  318. if (status & SDMMC_INTMASK_RTO) {
  319. // response timeout is only possible when response is expected
  320. assert(cmd->flags & SCF_RSP_PRESENT);
  321. err = ESP_ERR_TIMEOUT;
  322. } else if ((cmd->flags & SCF_RSP_CRC) && (status & SDMMC_INTMASK_RCRC)) {
  323. err = ESP_ERR_INVALID_CRC;
  324. } else if (status & SDMMC_INTMASK_RESP_ERR) {
  325. err = ESP_ERR_INVALID_RESPONSE;
  326. }
  327. if (err != ESP_OK) {
  328. cmd->error = err;
  329. if (cmd->data) {
  330. sdmmc_host_dma_stop();
  331. }
  332. ESP_LOGD(TAG, "%s: error 0x%x (status=%08x)", __func__, err, status);
  333. }
  334. }
  335. static void process_data_status(uint32_t status, sdmmc_command_t* cmd)
  336. {
  337. if (status & SDMMC_DATA_ERR_MASK) {
  338. if (status & SDMMC_INTMASK_DTO) {
  339. cmd->error = ESP_ERR_TIMEOUT;
  340. } else if (status & SDMMC_INTMASK_DCRC) {
  341. cmd->error = ESP_ERR_INVALID_CRC;
  342. } else if ((status & SDMMC_INTMASK_EBE) &&
  343. (cmd->flags & SCF_CMD_READ) == 0) {
  344. cmd->error = ESP_ERR_TIMEOUT;
  345. } else {
  346. cmd->error = ESP_FAIL;
  347. }
  348. SDMMC.ctrl.fifo_reset = 1;
  349. }
  350. if (cmd->error != 0) {
  351. if (cmd->data) {
  352. sdmmc_host_dma_stop();
  353. }
  354. ESP_LOGD(TAG, "%s: error 0x%x (status=%08x)", __func__, cmd->error, status);
  355. }
  356. }
  357. static inline bool mask_check_and_clear(uint32_t* state, uint32_t mask) {
  358. bool ret = ((*state) & mask) != 0;
  359. *state &= ~mask;
  360. return ret;
  361. }
  362. static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
  363. sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events)
  364. {
  365. const char* const s_state_names[] __attribute__((unused)) = {
  366. "IDLE",
  367. "SENDING_CMD",
  368. "SENDIND_DATA",
  369. "BUSY"
  370. };
  371. sdmmc_event_t orig_evt = evt;
  372. ESP_LOGV(TAG, "%s: state=%s evt=%x dma=%x", __func__, s_state_names[*pstate],
  373. evt.sdmmc_status, evt.dma_status);
  374. sdmmc_req_state_t next_state = *pstate;
  375. sdmmc_req_state_t state = (sdmmc_req_state_t) -1;
  376. while (next_state != state) {
  377. state = next_state;
  378. switch (state) {
  379. case SDMMC_IDLE:
  380. break;
  381. case SDMMC_SENDING_CMD:
  382. if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_CMD_ERR_MASK)) {
  383. process_command_response(orig_evt.sdmmc_status, cmd);
  384. break; // Need to wait for the CMD_DONE interrupt
  385. }
  386. if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_CMD_DONE)) {
  387. process_command_response(orig_evt.sdmmc_status, cmd);
  388. if (cmd->error != ESP_OK) {
  389. next_state = SDMMC_IDLE;
  390. break;
  391. }
  392. if (cmd->data == NULL) {
  393. next_state = SDMMC_IDLE;
  394. } else {
  395. next_state = SDMMC_SENDING_DATA;
  396. }
  397. }
  398. break;
  399. case SDMMC_SENDING_DATA:
  400. if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_DATA_ERR_MASK)) {
  401. process_data_status(orig_evt.sdmmc_status, cmd);
  402. sdmmc_host_dma_stop();
  403. }
  404. if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
  405. s_cur_transfer.desc_remaining--;
  406. if (s_cur_transfer.size_remaining) {
  407. int desc_to_fill = get_free_descriptors_count();
  408. fill_dma_descriptors(desc_to_fill);
  409. sdmmc_host_dma_resume();
  410. }
  411. if (s_cur_transfer.desc_remaining == 0) {
  412. next_state = SDMMC_BUSY;
  413. }
  414. }
  415. if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
  416. // On start bit error, DATA_DONE interrupt will not be generated
  417. next_state = SDMMC_IDLE;
  418. break;
  419. }
  420. break;
  421. case SDMMC_BUSY:
  422. if (!mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_DATA_OVER)) {
  423. break;
  424. }
  425. process_data_status(orig_evt.sdmmc_status, cmd);
  426. next_state = SDMMC_IDLE;
  427. break;
  428. }
  429. ESP_LOGV(TAG, "%s state=%s next_state=%s", __func__, s_state_names[state], s_state_names[next_state]);
  430. }
  431. *pstate = state;
  432. *unhandled_events = evt;
  433. return ESP_OK;
  434. }
  435. static bool wait_for_busy_cleared(int timeout_ms)
  436. {
  437. if (timeout_ms == 0) {
  438. return !sdmmc_host_card_busy();
  439. }
  440. /* It would have been nice to do this without polling, however the peripheral
  441. * can only generate Busy Clear Interrupt for data write commands, and waiting
  442. * for busy clear is mostly needed for other commands such as MMC_SWITCH.
  443. */
  444. int timeout_ticks = (timeout_ms + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS;
  445. while (timeout_ticks-- > 0) {
  446. if (!sdmmc_host_card_busy()) {
  447. return true;
  448. }
  449. vTaskDelay(1);
  450. }
  451. return false;
  452. }