sdmmc_cmd.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <inttypes.h>
  7. #include "esp_timer.h"
  8. #include "sdmmc_common.h"
  9. static const char* TAG = "sdmmc_cmd";
  10. esp_err_t sdmmc_send_cmd(sdmmc_card_t* card, sdmmc_command_t* cmd)
  11. {
  12. if (card->host.command_timeout_ms != 0) {
  13. cmd->timeout_ms = card->host.command_timeout_ms;
  14. } else if (cmd->timeout_ms == 0) {
  15. cmd->timeout_ms = SDMMC_DEFAULT_CMD_TIMEOUT_MS;
  16. }
  17. int slot = card->host.slot;
  18. ESP_LOGV(TAG, "sending cmd slot=%d op=%" PRIu32 " arg=%" PRIx32 " flags=%x data=%p blklen=%" PRIu32 " datalen=%" PRIu32 " timeout=%" PRIu32,
  19. slot, cmd->opcode, cmd->arg, cmd->flags, cmd->data, (uint32_t) cmd->blklen, (uint32_t) cmd->datalen, cmd->timeout_ms);
  20. esp_err_t err = (*card->host.do_transaction)(slot, cmd);
  21. if (err != 0) {
  22. ESP_LOGD(TAG, "cmd=%" PRIu32 ", sdmmc_req_run returned 0x%x", cmd->opcode, err);
  23. return err;
  24. }
  25. int state = MMC_R1_CURRENT_STATE(cmd->response);
  26. ESP_LOGV(TAG, "cmd response %08" PRIx32 " %08" PRIx32 " %08" PRIx32 " %08" PRIx32 " err=0x%x state=%d",
  27. cmd->response[0],
  28. cmd->response[1],
  29. cmd->response[2],
  30. cmd->response[3],
  31. cmd->error,
  32. state);
  33. return cmd->error;
  34. }
  35. esp_err_t sdmmc_send_app_cmd(sdmmc_card_t* card, sdmmc_command_t* cmd)
  36. {
  37. sdmmc_command_t app_cmd = {
  38. .opcode = MMC_APP_CMD,
  39. .flags = SCF_CMD_AC | SCF_RSP_R1,
  40. .arg = MMC_ARG_RCA(card->rca),
  41. };
  42. esp_err_t err = sdmmc_send_cmd(card, &app_cmd);
  43. if (err != ESP_OK) {
  44. return err;
  45. }
  46. // Check APP_CMD status bit (only in SD mode)
  47. if (!host_is_spi(card) && !(MMC_R1(app_cmd.response) & MMC_R1_APP_CMD)) {
  48. ESP_LOGW(TAG, "card doesn't support APP_CMD");
  49. return ESP_ERR_NOT_SUPPORTED;
  50. }
  51. return sdmmc_send_cmd(card, cmd);
  52. }
  53. esp_err_t sdmmc_send_cmd_go_idle_state(sdmmc_card_t* card)
  54. {
  55. sdmmc_command_t cmd = {
  56. .opcode = MMC_GO_IDLE_STATE,
  57. .flags = SCF_CMD_BC | SCF_RSP_R0,
  58. };
  59. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  60. if (host_is_spi(card)) {
  61. /* To enter SPI mode, CMD0 needs to be sent twice (see figure 4-1 in
  62. * SD Simplified spec v4.10). Some cards enter SD mode on first CMD0,
  63. * so don't expect the above command to succeed.
  64. * SCF_RSP_R1 flag below tells the lower layer to expect correct R1
  65. * response (in SPI mode).
  66. */
  67. (void) err;
  68. vTaskDelay(SDMMC_GO_IDLE_DELAY_MS / portTICK_PERIOD_MS);
  69. cmd.flags |= SCF_RSP_R1;
  70. err = sdmmc_send_cmd(card, &cmd);
  71. }
  72. if (err == ESP_OK) {
  73. vTaskDelay(SDMMC_GO_IDLE_DELAY_MS / portTICK_PERIOD_MS);
  74. }
  75. return err;
  76. }
  77. esp_err_t sdmmc_send_cmd_send_if_cond(sdmmc_card_t* card, uint32_t ocr)
  78. {
  79. const uint8_t pattern = 0xaa; /* any pattern will do here */
  80. sdmmc_command_t cmd = {
  81. .opcode = SD_SEND_IF_COND,
  82. .arg = (((ocr & SD_OCR_VOL_MASK) != 0) << 8) | pattern,
  83. .flags = SCF_CMD_BCR | SCF_RSP_R7,
  84. };
  85. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  86. if (err != ESP_OK) {
  87. return err;
  88. }
  89. uint8_t response = cmd.response[0] & 0xff;
  90. if (response != pattern) {
  91. ESP_LOGD(TAG, "%s: received=0x%x expected=0x%x", __func__, response, pattern);
  92. return ESP_ERR_INVALID_RESPONSE;
  93. }
  94. return ESP_OK;
  95. }
  96. esp_err_t sdmmc_send_cmd_send_op_cond(sdmmc_card_t* card, uint32_t ocr, uint32_t *ocrp)
  97. {
  98. esp_err_t err;
  99. /* If the host supports this, keep card clock enabled
  100. * from the start of ACMD41 until the card is idle.
  101. * (Ref. SD spec, section 4.4 "Clock control".)
  102. */
  103. if (card->host.set_cclk_always_on != NULL) {
  104. err = card->host.set_cclk_always_on(card->host.slot, true);
  105. if (err != ESP_OK) {
  106. ESP_LOGE(TAG, "%s: set_cclk_always_on (1) err=0x%x", __func__, err);
  107. return err;
  108. }
  109. ESP_LOGV(TAG, "%s: keeping clock on during ACMD41", __func__);
  110. }
  111. sdmmc_command_t cmd = {
  112. .arg = ocr,
  113. .flags = SCF_CMD_BCR | SCF_RSP_R3,
  114. .opcode = SD_APP_OP_COND
  115. };
  116. int nretries = SDMMC_SEND_OP_COND_MAX_RETRIES;
  117. int err_cnt = SDMMC_SEND_OP_COND_MAX_ERRORS;
  118. for (; nretries != 0; --nretries) {
  119. bzero(&cmd, sizeof cmd);
  120. cmd.arg = ocr;
  121. cmd.flags = SCF_CMD_BCR | SCF_RSP_R3;
  122. if (!card->is_mmc) { /* SD mode */
  123. cmd.opcode = SD_APP_OP_COND;
  124. err = sdmmc_send_app_cmd(card, &cmd);
  125. } else { /* MMC mode */
  126. cmd.arg &= ~MMC_OCR_ACCESS_MODE_MASK;
  127. cmd.arg |= MMC_OCR_SECTOR_MODE;
  128. cmd.opcode = MMC_SEND_OP_COND;
  129. err = sdmmc_send_cmd(card, &cmd);
  130. }
  131. if (err != ESP_OK) {
  132. if (--err_cnt == 0) {
  133. ESP_LOGD(TAG, "%s: sdmmc_send_app_cmd err=0x%x", __func__, err);
  134. goto done;
  135. } else {
  136. ESP_LOGV(TAG, "%s: ignoring err=0x%x", __func__, err);
  137. continue;
  138. }
  139. }
  140. // In SD protocol, card sets MEM_READY bit in OCR when it is ready.
  141. // In SPI protocol, card clears IDLE_STATE bit in R1 response.
  142. if (!host_is_spi(card)) {
  143. if ((MMC_R3(cmd.response) & MMC_OCR_MEM_READY) ||
  144. ocr == 0) {
  145. break;
  146. }
  147. } else {
  148. if ((SD_SPI_R1(cmd.response) & SD_SPI_R1_IDLE_STATE) == 0) {
  149. break;
  150. }
  151. }
  152. vTaskDelay(10 / portTICK_PERIOD_MS);
  153. }
  154. if (nretries == 0) {
  155. err = ESP_ERR_TIMEOUT;
  156. goto done;
  157. }
  158. if (ocrp) {
  159. *ocrp = MMC_R3(cmd.response);
  160. }
  161. err = ESP_OK;
  162. done:
  163. if (card->host.set_cclk_always_on != NULL) {
  164. esp_err_t err_cclk_dis = card->host.set_cclk_always_on(card->host.slot, false);
  165. if (err_cclk_dis != ESP_OK) {
  166. ESP_LOGE(TAG, "%s: set_cclk_always_on (2) err=0x%x", __func__, err);
  167. /* If we failed to disable clock, don't overwrite 'err' to return the original error */
  168. }
  169. ESP_LOGV(TAG, "%s: clock always-on mode disabled", __func__);
  170. }
  171. return err;
  172. }
  173. esp_err_t sdmmc_send_cmd_read_ocr(sdmmc_card_t *card, uint32_t *ocrp)
  174. {
  175. assert(ocrp);
  176. sdmmc_command_t cmd = {
  177. .opcode = SD_READ_OCR,
  178. .flags = SCF_CMD_BCR | SCF_RSP_R2
  179. };
  180. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  181. if (err != ESP_OK) {
  182. return err;
  183. }
  184. *ocrp = SD_SPI_R3(cmd.response);
  185. return ESP_OK;
  186. }
  187. esp_err_t sdmmc_send_cmd_all_send_cid(sdmmc_card_t* card, sdmmc_response_t* out_raw_cid)
  188. {
  189. assert(out_raw_cid);
  190. sdmmc_command_t cmd = {
  191. .opcode = MMC_ALL_SEND_CID,
  192. .flags = SCF_CMD_BCR | SCF_RSP_R2
  193. };
  194. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  195. if (err != ESP_OK) {
  196. return err;
  197. }
  198. memcpy(out_raw_cid, &cmd.response, sizeof(sdmmc_response_t));
  199. return ESP_OK;
  200. }
  201. esp_err_t sdmmc_send_cmd_send_cid(sdmmc_card_t *card, sdmmc_cid_t *out_cid)
  202. {
  203. assert(out_cid);
  204. assert(host_is_spi(card) && "SEND_CID should only be used in SPI mode");
  205. assert(!card->is_mmc && "MMC cards are not supported in SPI mode");
  206. sdmmc_response_t buf;
  207. sdmmc_command_t cmd = {
  208. .opcode = MMC_SEND_CID,
  209. .flags = SCF_CMD_READ | SCF_CMD_ADTC,
  210. .arg = 0,
  211. .data = &buf[0],
  212. .datalen = sizeof(buf)
  213. };
  214. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  215. if (err != ESP_OK) {
  216. return err;
  217. }
  218. sdmmc_flip_byte_order(buf, sizeof(buf));
  219. return sdmmc_decode_cid(buf, out_cid);
  220. }
  221. esp_err_t sdmmc_send_cmd_set_relative_addr(sdmmc_card_t* card, uint16_t* out_rca)
  222. {
  223. assert(out_rca);
  224. sdmmc_command_t cmd = {
  225. .opcode = SD_SEND_RELATIVE_ADDR,
  226. .flags = SCF_CMD_BCR | SCF_RSP_R6
  227. };
  228. /* MMC cards expect us to set the RCA.
  229. * Set RCA to 1 since we don't support multiple cards on the same bus, for now.
  230. */
  231. uint16_t mmc_rca = 1;
  232. if (card->is_mmc) {
  233. cmd.arg = MMC_ARG_RCA(mmc_rca);
  234. }
  235. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  236. if (err != ESP_OK) {
  237. return err;
  238. }
  239. *out_rca = (card->is_mmc) ? mmc_rca : SD_R6_RCA(cmd.response);
  240. return ESP_OK;
  241. }
  242. esp_err_t sdmmc_send_cmd_set_blocklen(sdmmc_card_t* card, sdmmc_csd_t* csd)
  243. {
  244. sdmmc_command_t cmd = {
  245. .opcode = MMC_SET_BLOCKLEN,
  246. .arg = csd->sector_size,
  247. .flags = SCF_CMD_AC | SCF_RSP_R1
  248. };
  249. return sdmmc_send_cmd(card, &cmd);
  250. }
  251. esp_err_t sdmmc_send_cmd_send_csd(sdmmc_card_t* card, sdmmc_csd_t* out_csd)
  252. {
  253. /* The trick with SEND_CSD is that in SPI mode, it acts as a data read
  254. * command, while in SD mode it is an AC command with R2 response.
  255. */
  256. sdmmc_response_t spi_buf;
  257. const bool is_spi = host_is_spi(card);
  258. sdmmc_command_t cmd = {
  259. .opcode = MMC_SEND_CSD,
  260. .arg = is_spi ? 0 : MMC_ARG_RCA(card->rca),
  261. .flags = is_spi ? (SCF_CMD_READ | SCF_CMD_ADTC | SCF_RSP_R1) :
  262. (SCF_CMD_AC | SCF_RSP_R2),
  263. .data = is_spi ? &spi_buf[0] : 0,
  264. .datalen = is_spi ? sizeof(spi_buf) : 0,
  265. };
  266. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  267. if (err != ESP_OK) {
  268. return err;
  269. }
  270. uint32_t* ptr = cmd.response;
  271. if (is_spi) {
  272. sdmmc_flip_byte_order(spi_buf, sizeof(spi_buf));
  273. ptr = spi_buf;
  274. }
  275. if (card->is_mmc) {
  276. err = sdmmc_mmc_decode_csd(cmd.response, out_csd);
  277. } else {
  278. err = sdmmc_decode_csd(ptr, out_csd);
  279. }
  280. return err;
  281. }
  282. esp_err_t sdmmc_send_cmd_select_card(sdmmc_card_t* card, uint32_t rca)
  283. {
  284. /* Don't expect to see a response when de-selecting a card */
  285. uint32_t response = (rca == 0) ? 0 : SCF_RSP_R1;
  286. sdmmc_command_t cmd = {
  287. .opcode = MMC_SELECT_CARD,
  288. .arg = MMC_ARG_RCA(rca),
  289. .flags = SCF_CMD_AC | response
  290. };
  291. return sdmmc_send_cmd(card, &cmd);
  292. }
  293. esp_err_t sdmmc_send_cmd_send_scr(sdmmc_card_t* card, sdmmc_scr_t *out_scr)
  294. {
  295. size_t datalen = 8;
  296. esp_err_t err = ESP_FAIL;
  297. uint32_t *buf = NULL;
  298. size_t actual_size = 0;
  299. err = esp_dma_malloc(datalen, 0, (void *)&buf, &actual_size);
  300. if (err != ESP_OK) {
  301. return err;
  302. }
  303. sdmmc_command_t cmd = {
  304. .data = buf,
  305. .datalen = datalen,
  306. .buflen = actual_size,
  307. .blklen = datalen,
  308. .flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1,
  309. .opcode = SD_APP_SEND_SCR
  310. };
  311. err = sdmmc_send_app_cmd(card, &cmd);
  312. if (err == ESP_OK) {
  313. err = sdmmc_decode_scr(buf, out_scr);
  314. }
  315. free(buf);
  316. return err;
  317. }
  318. esp_err_t sdmmc_send_cmd_set_bus_width(sdmmc_card_t* card, int width)
  319. {
  320. sdmmc_command_t cmd = {
  321. .opcode = SD_APP_SET_BUS_WIDTH,
  322. .flags = SCF_RSP_R1 | SCF_CMD_AC,
  323. .arg = (width == 4) ? SD_ARG_BUS_WIDTH_4 : SD_ARG_BUS_WIDTH_1,
  324. };
  325. return sdmmc_send_app_cmd(card, &cmd);
  326. }
  327. esp_err_t sdmmc_send_cmd_crc_on_off(sdmmc_card_t* card, bool crc_enable)
  328. {
  329. assert(host_is_spi(card) && "CRC_ON_OFF can only be used in SPI mode");
  330. sdmmc_command_t cmd = {
  331. .opcode = SD_CRC_ON_OFF,
  332. .arg = crc_enable ? 1 : 0,
  333. .flags = SCF_CMD_AC | SCF_RSP_R1
  334. };
  335. return sdmmc_send_cmd(card, &cmd);
  336. }
  337. esp_err_t sdmmc_send_cmd_send_status(sdmmc_card_t* card, uint32_t* out_status)
  338. {
  339. sdmmc_command_t cmd = {
  340. .opcode = MMC_SEND_STATUS,
  341. .arg = MMC_ARG_RCA(card->rca),
  342. .flags = SCF_CMD_AC | SCF_RSP_R1
  343. };
  344. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  345. if (err != ESP_OK) {
  346. return err;
  347. }
  348. if (out_status) {
  349. if (host_is_spi(card)) {
  350. *out_status = SD_SPI_R2(cmd.response);
  351. } else {
  352. *out_status = MMC_R1(cmd.response);
  353. }
  354. }
  355. return ESP_OK;
  356. }
  357. esp_err_t sdmmc_write_sectors(sdmmc_card_t* card, const void* src,
  358. size_t start_block, size_t block_count)
  359. {
  360. if (block_count == 0) {
  361. return ESP_OK;
  362. }
  363. esp_err_t err = ESP_OK;
  364. size_t block_size = card->csd.sector_size;
  365. if (esp_dma_is_buffer_aligned(src, block_size * block_count, ESP_DMA_BUF_LOCATION_INTERNAL)) {
  366. err = sdmmc_write_sectors_dma(card, src, start_block, block_count, block_size * block_count);
  367. } else {
  368. // SDMMC peripheral needs DMA-capable buffers. Split the write into
  369. // separate single block writes, if needed, and allocate a temporary
  370. // DMA-capable buffer.
  371. void *tmp_buf = NULL;
  372. size_t actual_size = 0;
  373. err = esp_dma_malloc(block_size, 0, &tmp_buf, &actual_size);
  374. if (err != ESP_OK) {
  375. return err;
  376. }
  377. const uint8_t* cur_src = (const uint8_t*) src;
  378. for (size_t i = 0; i < block_count; ++i) {
  379. memcpy(tmp_buf, cur_src, block_size);
  380. cur_src += block_size;
  381. err = sdmmc_write_sectors_dma(card, tmp_buf, start_block + i, 1, actual_size);
  382. if (err != ESP_OK) {
  383. ESP_LOGD(TAG, "%s: error 0x%x writing block %d+%d",
  384. __func__, err, start_block, i);
  385. break;
  386. }
  387. }
  388. free(tmp_buf);
  389. }
  390. return err;
  391. }
  392. esp_err_t sdmmc_write_sectors_dma(sdmmc_card_t* card, const void* src,
  393. size_t start_block, size_t block_count, size_t buffer_len)
  394. {
  395. if (start_block + block_count > card->csd.capacity) {
  396. return ESP_ERR_INVALID_SIZE;
  397. }
  398. size_t block_size = card->csd.sector_size;
  399. sdmmc_command_t cmd = {
  400. .flags = SCF_CMD_ADTC | SCF_RSP_R1,
  401. .blklen = block_size,
  402. .data = (void*) src,
  403. .datalen = block_count * block_size,
  404. .buflen = buffer_len,
  405. .timeout_ms = SDMMC_WRITE_CMD_TIMEOUT_MS
  406. };
  407. if (block_count == 1) {
  408. cmd.opcode = MMC_WRITE_BLOCK_SINGLE;
  409. } else {
  410. cmd.opcode = MMC_WRITE_BLOCK_MULTIPLE;
  411. }
  412. if (card->ocr & SD_OCR_SDHC_CAP) {
  413. cmd.arg = start_block;
  414. } else {
  415. cmd.arg = start_block * block_size;
  416. }
  417. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  418. if (err != ESP_OK) {
  419. ESP_LOGE(TAG, "%s: sdmmc_send_cmd returned 0x%x", __func__, err);
  420. return err;
  421. }
  422. uint32_t status = 0;
  423. size_t count = 0;
  424. int64_t yield_delay_us = 100 * 1000; // initially 100ms
  425. int64_t t0 = esp_timer_get_time();
  426. int64_t t1 = 0;
  427. /* SD mode: wait for the card to become idle based on R1 status */
  428. while (!host_is_spi(card) && !(status & MMC_R1_READY_FOR_DATA)) {
  429. t1 = esp_timer_get_time();
  430. if (t1 - t0 > SDMMC_READY_FOR_DATA_TIMEOUT_US) {
  431. ESP_LOGE(TAG, "write sectors dma - timeout");
  432. return ESP_ERR_TIMEOUT;
  433. }
  434. if (t1 - t0 > yield_delay_us) {
  435. yield_delay_us *= 2;
  436. vTaskDelay(1);
  437. }
  438. err = sdmmc_send_cmd_send_status(card, &status);
  439. if (err != ESP_OK) {
  440. ESP_LOGE(TAG, "%s: sdmmc_send_cmd_send_status returned 0x%x", __func__, err);
  441. return err;
  442. }
  443. if (++count % 16 == 0) {
  444. ESP_LOGV(TAG, "waiting for card to become ready (%" PRIu32 ")", (uint32_t) count);
  445. }
  446. }
  447. /* SPI mode: although card busy indication is based on the busy token,
  448. * SD spec recommends that the host checks the results of programming by sending
  449. * SEND_STATUS command. Some of the conditions reported in SEND_STATUS are not
  450. * reported via a data error token.
  451. */
  452. if (host_is_spi(card)) {
  453. err = sdmmc_send_cmd_send_status(card, &status);
  454. if (err != ESP_OK) {
  455. ESP_LOGE(TAG, "%s: sdmmc_send_cmd_send_status returned 0x%x", __func__, err);
  456. return err;
  457. }
  458. if (status & SD_SPI_R2_CARD_LOCKED) {
  459. ESP_LOGE(TAG, "%s: write failed, card is locked: r2=0x%04" PRIx32,
  460. __func__, status);
  461. return ESP_ERR_INVALID_STATE;
  462. }
  463. if (status != 0) {
  464. ESP_LOGE(TAG, "%s: card status indicates an error after write operation: r2=0x%04" PRIx32,
  465. __func__, status);
  466. return ESP_ERR_INVALID_RESPONSE;
  467. }
  468. }
  469. return ESP_OK;
  470. }
  471. esp_err_t sdmmc_read_sectors(sdmmc_card_t* card, void* dst,
  472. size_t start_block, size_t block_count)
  473. {
  474. if (block_count == 0) {
  475. return ESP_OK;
  476. }
  477. esp_err_t err = ESP_OK;
  478. size_t block_size = card->csd.sector_size;
  479. if (esp_dma_is_buffer_aligned(dst, block_size * block_count, ESP_DMA_BUF_LOCATION_INTERNAL)) {
  480. err = sdmmc_read_sectors_dma(card, dst, start_block, block_count, block_size * block_count);
  481. } else {
  482. // SDMMC peripheral needs DMA-capable buffers. Split the read into
  483. // separate single block reads, if needed, and allocate a temporary
  484. // DMA-capable buffer.
  485. void *tmp_buf = NULL;
  486. size_t actual_size = 0;
  487. err = esp_dma_malloc(block_size, 0, &tmp_buf, &actual_size);
  488. if (err != ESP_OK) {
  489. return err;
  490. }
  491. uint8_t* cur_dst = (uint8_t*) dst;
  492. for (size_t i = 0; i < block_count; ++i) {
  493. err = sdmmc_read_sectors_dma(card, tmp_buf, start_block + i, 1, actual_size);
  494. if (err != ESP_OK) {
  495. ESP_LOGD(TAG, "%s: error 0x%x writing block %d+%d",
  496. __func__, err, start_block, i);
  497. break;
  498. }
  499. memcpy(cur_dst, tmp_buf, block_size);
  500. cur_dst += block_size;
  501. }
  502. free(tmp_buf);
  503. }
  504. return err;
  505. }
  506. esp_err_t sdmmc_read_sectors_dma(sdmmc_card_t* card, void* dst,
  507. size_t start_block, size_t block_count, size_t buffer_len)
  508. {
  509. if (start_block + block_count > card->csd.capacity) {
  510. return ESP_ERR_INVALID_SIZE;
  511. }
  512. size_t block_size = card->csd.sector_size;
  513. sdmmc_command_t cmd = {
  514. .flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1,
  515. .blklen = block_size,
  516. .data = (void*) dst,
  517. .datalen = block_count * block_size,
  518. .buflen = buffer_len,
  519. };
  520. if (block_count == 1) {
  521. cmd.opcode = MMC_READ_BLOCK_SINGLE;
  522. } else {
  523. cmd.opcode = MMC_READ_BLOCK_MULTIPLE;
  524. }
  525. if (card->ocr & SD_OCR_SDHC_CAP) {
  526. cmd.arg = start_block;
  527. } else {
  528. cmd.arg = start_block * block_size;
  529. }
  530. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  531. if (err != ESP_OK) {
  532. ESP_LOGE(TAG, "%s: sdmmc_send_cmd returned 0x%x", __func__, err);
  533. return err;
  534. }
  535. uint32_t status = 0;
  536. size_t count = 0;
  537. int64_t yield_delay_us = 100 * 1000; // initially 100ms
  538. int64_t t0 = esp_timer_get_time();
  539. int64_t t1 = 0;
  540. /* SD mode: wait for the card to become idle based on R1 status */
  541. while (!host_is_spi(card) && !(status & MMC_R1_READY_FOR_DATA)) {
  542. t1 = esp_timer_get_time();
  543. if (t1 - t0 > SDMMC_READY_FOR_DATA_TIMEOUT_US) {
  544. ESP_LOGE(TAG, "read sectors dma - timeout");
  545. return ESP_ERR_TIMEOUT;
  546. }
  547. if (t1 - t0 > yield_delay_us) {
  548. yield_delay_us *= 2;
  549. vTaskDelay(1);
  550. }
  551. err = sdmmc_send_cmd_send_status(card, &status);
  552. if (err != ESP_OK) {
  553. ESP_LOGE(TAG, "%s: sdmmc_send_cmd_send_status returned 0x%x", __func__, err);
  554. return err;
  555. }
  556. if (++count % 16 == 0) {
  557. ESP_LOGV(TAG, "waiting for card to become ready (%d)", count);
  558. }
  559. }
  560. return ESP_OK;
  561. }
  562. esp_err_t sdmmc_erase_sectors(sdmmc_card_t* card, size_t start_sector,
  563. size_t sector_count, sdmmc_erase_arg_t arg)
  564. {
  565. if (sector_count == 0) {
  566. return ESP_OK;
  567. }
  568. if (start_sector + sector_count > card->csd.capacity) {
  569. return ESP_ERR_INVALID_SIZE;
  570. }
  571. uint32_t cmd38_arg;
  572. if (arg == SDMMC_ERASE_ARG) {
  573. cmd38_arg = card->is_mmc ? SDMMC_MMC_TRIM_ARG : SDMMC_SD_ERASE_ARG;
  574. } else {
  575. cmd38_arg = card->is_mmc ? SDMMC_MMC_DISCARD_ARG : SDMMC_SD_DISCARD_ARG;
  576. }
  577. /* validate the CMD38 argument against card supported features */
  578. if (card->is_mmc) {
  579. if ((cmd38_arg == SDMMC_MMC_TRIM_ARG) && (sdmmc_can_trim(card) != ESP_OK)) {
  580. return ESP_ERR_NOT_SUPPORTED;
  581. }
  582. if ((cmd38_arg == SDMMC_MMC_DISCARD_ARG) && (sdmmc_can_discard(card) != ESP_OK)) {
  583. return ESP_ERR_NOT_SUPPORTED;
  584. }
  585. } else { // SD card
  586. if ((cmd38_arg == SDMMC_SD_DISCARD_ARG) && (sdmmc_can_discard(card) != ESP_OK)) {
  587. return ESP_ERR_NOT_SUPPORTED;
  588. }
  589. }
  590. /* default as block unit address */
  591. size_t addr_unit_mult = 1;
  592. if (!(card->ocr & SD_OCR_SDHC_CAP)) {
  593. addr_unit_mult = card->csd.sector_size;
  594. }
  595. /* prepare command to set the start address */
  596. sdmmc_command_t cmd = {
  597. .flags = SCF_CMD_AC | SCF_RSP_R1 | SCF_WAIT_BUSY,
  598. .opcode = card->is_mmc ? MMC_ERASE_GROUP_START :
  599. SD_ERASE_GROUP_START,
  600. .arg = (start_sector * addr_unit_mult),
  601. };
  602. esp_err_t err = sdmmc_send_cmd(card, &cmd);
  603. if (err != ESP_OK) {
  604. ESP_LOGE(TAG, "%s: sdmmc_send_cmd (ERASE_GROUP_START) returned 0x%x", __func__, err);
  605. return err;
  606. }
  607. /* prepare command to set the end address */
  608. cmd.opcode = card->is_mmc ? MMC_ERASE_GROUP_END : SD_ERASE_GROUP_END;
  609. cmd.arg = ((start_sector + (sector_count - 1)) * addr_unit_mult);
  610. err = sdmmc_send_cmd(card, &cmd);
  611. if (err != ESP_OK) {
  612. ESP_LOGE(TAG, "%s: sdmmc_send_cmd (ERASE_GROUP_END) returned 0x%x", __func__, err);
  613. return err;
  614. }
  615. /* issue erase command */
  616. memset((void *)&cmd, 0 , sizeof(sdmmc_command_t));
  617. cmd.flags = SCF_CMD_AC | SCF_RSP_R1B | SCF_WAIT_BUSY;
  618. cmd.opcode = MMC_ERASE;
  619. cmd.arg = cmd38_arg;
  620. cmd.timeout_ms = sdmmc_get_erase_timeout_ms(card, cmd38_arg, sector_count * card->csd.sector_size / 1024);
  621. err = sdmmc_send_cmd(card, &cmd);
  622. if (err != ESP_OK) {
  623. ESP_LOGE(TAG, "%s: sdmmc_send_cmd (ERASE) returned 0x%x", __func__, err);
  624. return err;
  625. }
  626. if (host_is_spi(card)) {
  627. uint32_t status;
  628. err = sdmmc_send_cmd_send_status(card, &status);
  629. if (err != ESP_OK) {
  630. ESP_LOGE(TAG, "%s: sdmmc_send_cmd_send_status returned 0x%x", __func__, err);
  631. return err;
  632. }
  633. if (status != 0) {
  634. ESP_LOGE(TAG, "%s: card status indicates an error after erase operation: r2=0x%04" PRIx32,
  635. __func__, status);
  636. return ESP_ERR_INVALID_RESPONSE;
  637. }
  638. }
  639. return ESP_OK;
  640. }
  641. esp_err_t sdmmc_can_discard(sdmmc_card_t* card)
  642. {
  643. if ((card->is_mmc) && (card->ext_csd.rev >= EXT_CSD_REV_1_6)) {
  644. return ESP_OK;
  645. }
  646. // SD card
  647. if ((!card->is_mmc) && !host_is_spi(card) && (card->ssr.discard_support == 1)) {
  648. return ESP_OK;
  649. }
  650. return ESP_FAIL;
  651. }
  652. esp_err_t sdmmc_can_trim(sdmmc_card_t* card)
  653. {
  654. if ((card->is_mmc) && (card->ext_csd.sec_feature & EXT_CSD_SEC_GB_CL_EN)) {
  655. return ESP_OK;
  656. }
  657. return ESP_FAIL;
  658. }
  659. esp_err_t sdmmc_mmc_can_sanitize(sdmmc_card_t* card)
  660. {
  661. if ((card->is_mmc) && (card->ext_csd.sec_feature & EXT_CSD_SEC_SANITIZE)) {
  662. return ESP_OK;
  663. }
  664. return ESP_FAIL;
  665. }
  666. esp_err_t sdmmc_mmc_sanitize(sdmmc_card_t* card, uint32_t timeout_ms)
  667. {
  668. esp_err_t err;
  669. uint8_t index = EXT_CSD_SANITIZE_START;
  670. uint8_t set = EXT_CSD_CMD_SET_NORMAL;
  671. uint8_t value = 0x01;
  672. if (sdmmc_mmc_can_sanitize(card) != ESP_OK) {
  673. return ESP_ERR_NOT_SUPPORTED;
  674. }
  675. /*
  676. * A Sanitize operation is initiated by writing a value to the extended
  677. * CSD[165] SANITIZE_START. While the device is performing the sanitize
  678. * operation, the busy line is asserted.
  679. * SWITCH command is used to write the EXT_CSD register.
  680. */
  681. sdmmc_command_t cmd = {
  682. .opcode = MMC_SWITCH,
  683. .arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) | (value << 8) | set,
  684. .flags = SCF_RSP_R1B | SCF_CMD_AC | SCF_WAIT_BUSY,
  685. .timeout_ms = timeout_ms,
  686. };
  687. err = sdmmc_send_cmd(card, &cmd);
  688. if (err == ESP_OK) {
  689. //check response bit to see that switch was accepted
  690. if (MMC_R1(cmd.response) & MMC_R1_SWITCH_ERROR) {
  691. err = ESP_ERR_INVALID_RESPONSE;
  692. }
  693. }
  694. return err;
  695. }
  696. esp_err_t sdmmc_full_erase(sdmmc_card_t* card)
  697. {
  698. sdmmc_erase_arg_t arg = SDMMC_SD_ERASE_ARG; // erase by default for SD card
  699. esp_err_t err;
  700. if (card->is_mmc) {
  701. arg = sdmmc_mmc_can_sanitize(card) == ESP_OK ? SDMMC_MMC_DISCARD_ARG: SDMMC_MMC_TRIM_ARG;
  702. }
  703. err = sdmmc_erase_sectors(card, 0, card->csd.capacity, arg);
  704. if ((err == ESP_OK) && (arg == SDMMC_MMC_DISCARD_ARG)) {
  705. uint32_t timeout_ms = sdmmc_get_erase_timeout_ms(card, SDMMC_MMC_DISCARD_ARG, card->csd.capacity * ((uint64_t) card->csd.sector_size) / 1024);
  706. return sdmmc_mmc_sanitize(card, timeout_ms);
  707. }
  708. return err;
  709. }
  710. esp_err_t sdmmc_get_status(sdmmc_card_t* card)
  711. {
  712. uint32_t stat;
  713. return sdmmc_send_cmd_send_status(card, &stat);
  714. }