blkpart.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. #define pr_fmt(fmt) "blkpart: " fmt
  2. #include <stdio.h>
  3. #include <stdint.h>
  4. #include <errno.h>
  5. #include <stdlib.h>
  6. #include <stdbool.h>
  7. #include <string.h>
  8. #include <blkpart.h>
  9. #include <rtthread.h>
  10. #include <rtdevice.h>
  11. #include <drv_sdmmc.h>
  12. #define MIN(a, b) ((a) > (b) ? (b) : (a))
  13. #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
  14. #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
  15. #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
  16. static struct blkpart *blk_head = NULL;
  17. void blkpart_del_list(struct blkpart *blk)
  18. {
  19. struct blkpart *pblk, *pre;
  20. if (!blk_head)
  21. {
  22. return;
  23. }
  24. pblk = pre = blk_head;
  25. for (pblk = blk_head; pblk; pre = pblk, pblk = pblk->next)
  26. {
  27. if (pblk == blk)
  28. {
  29. if (pblk == blk_head)
  30. {
  31. blk_head = NULL;
  32. }
  33. else
  34. {
  35. pre->next = pblk->next;
  36. }
  37. break;
  38. }
  39. }
  40. }
  41. void blkpart_add_list(struct blkpart *blk)
  42. {
  43. struct blkpart *pblk, *pre;
  44. blk->next = NULL;
  45. if (!blk_head)
  46. {
  47. blk_head = blk;
  48. return;
  49. }
  50. pblk = pre = blk_head;
  51. while (pblk)
  52. {
  53. pre = pblk;
  54. pblk = pblk->next;
  55. }
  56. pre->next = blk;
  57. }
  58. void del_blkpart(struct blkpart *blk)
  59. {
  60. int i;
  61. if (!blk)
  62. {
  63. return;
  64. }
  65. for (i = 0; i < blk->n_parts; i++)
  66. {
  67. struct part *part = &blk->parts[i];
  68. if (!part)
  69. {
  70. continue;
  71. }
  72. }
  73. blkpart_del_list(blk);
  74. }
  75. struct part *get_part_by_index(const char *blk_name, uint32_t index)
  76. {
  77. struct blkpart *blk = blk_head;
  78. for (blk = blk_head; blk; blk = blk->next)
  79. {
  80. if (!strcmp(blk_name, blk->name))
  81. {
  82. if (index == 0)
  83. {
  84. return &blk->root;
  85. }
  86. else if (index == PARTINDEX_THE_LAST)
  87. {
  88. return &blk->parts[blk->n_parts - 1];
  89. }
  90. else if (blk->n_parts >= index)
  91. {
  92. return &blk->parts[index - 1];
  93. }
  94. else
  95. {
  96. return NULL;
  97. }
  98. }
  99. }
  100. return NULL;
  101. }
  102. #ifdef CONFIG_BLKPART_SHOW_INFO_CMD
  103. static int part_info_main(int argc, char **argv)
  104. {
  105. int i;
  106. struct blkpart *blk;
  107. struct part *part;
  108. for (blk = blk_head; blk; blk = blk->next)
  109. {
  110. for (i = 0; i < blk->n_parts; i++)
  111. {
  112. part = &blk->parts[i];
  113. printf("%s(%s): bytes 0x%llx off 0x%llx\n", part->name, part->devname,
  114. part->bytes, part->off);
  115. }
  116. }
  117. return 0;
  118. }
  119. FINSH_FUNCTION_EXPORT_CMD(part_info_main, __cmd_part_info, dump nor partitions);
  120. #endif
  121. struct part *get_part_by_name(const char *name)
  122. {
  123. struct blkpart *blk;
  124. if (!strncmp(name, "/dev/", sizeof("/dev/") - 1))
  125. {
  126. name += sizeof("/dev/") - 1;
  127. }
  128. for (blk = blk_head; blk; blk = blk->next)
  129. {
  130. int i;
  131. for (i = 0; i < blk->n_parts; i++)
  132. {
  133. struct part *part = &blk->parts[i];
  134. if (!strcmp(part->name, name))
  135. {
  136. return part;
  137. }
  138. if (!strcmp(part->devname, name))
  139. {
  140. return part;
  141. }
  142. }
  143. }
  144. return NULL;
  145. }
  146. struct blkpart *get_blkpart_by_name(const char *name)
  147. {
  148. struct blkpart *blk;
  149. if (!name)
  150. {
  151. return blk_head;
  152. }
  153. for (blk = blk_head; blk; blk = blk->next)
  154. {
  155. if (!strcmp(blk->name, name))
  156. {
  157. return blk;
  158. }
  159. }
  160. return NULL;
  161. }
  162. rt_size_t part_read(rt_device_t dev, rt_off_t offset, void *data, rt_size_t size)
  163. {
  164. if (size == 0)
  165. {
  166. return 0;
  167. }
  168. ssize_t ret, sz = 0;
  169. struct part *part = (struct part *)dev->user_data;
  170. struct blkpart *blk = part->blk;
  171. rt_device_t spinor_dev = blk->dev;
  172. size *= blk->blk_bytes; /* sector to size */
  173. offset *= blk->blk_bytes;
  174. char *page_buf = NULL;
  175. if (offset >= part->bytes)
  176. {
  177. printf("read offset %lu over part size %lu\n", offset, part->bytes);
  178. return 0;
  179. }
  180. if (offset + size > part->bytes)
  181. {
  182. printf("read %s(%s) over limit: offset %lu + size %lu over %lu\n",
  183. part->name, part->devname, offset, size, part->bytes);
  184. }
  185. size = MIN(part->bytes - offset, size);
  186. pr_debug("read %s(%s) off 0x%x size %lu\n", part->name, part->devname,
  187. offset, size);
  188. offset += part->off;
  189. if (offset % blk->page_bytes || size % blk->page_bytes)
  190. {
  191. page_buf = malloc(blk->page_bytes);
  192. if (!page_buf)
  193. {
  194. return -ENOMEM;
  195. }
  196. memset(page_buf, 0, blk->page_bytes);
  197. }
  198. /**
  199. * Step 1:
  200. * read the beginning data that not align to block size
  201. */
  202. if (offset % blk->page_bytes)
  203. {
  204. uint32_t addr, poff, len;
  205. addr = ALIGN_DOWN(offset, blk->page_bytes);
  206. poff = offset - addr;
  207. len = MIN(blk->page_bytes - poff, size);
  208. pr_debug("offset %lu not align %u, fix them before align read\n",
  209. offset, blk->blk_bytes);
  210. pr_debug("step1: read page data from addr 0x%x\n", addr);
  211. ret = rt_dev_read(spinor_dev, addr / blk->page_bytes, page_buf, blk->page_bytes / blk->page_bytes);
  212. ret *= blk->page_bytes;
  213. if (ret != blk->blk_bytes)
  214. {
  215. goto err;
  216. }
  217. pr_debug("step2: copy page data to buf with page offset 0x%x and len %u\n",
  218. poff, len);
  219. memcpy(data, page_buf + poff, len);
  220. offset += len;
  221. data += len;
  222. sz += len;
  223. size -= len;
  224. }
  225. /**
  226. * Step 2:
  227. * read data that align to block size
  228. */
  229. while (size >= blk->page_bytes)
  230. {
  231. uint32_t len = (size/blk->page_bytes)*blk->page_bytes;
  232. ret = rt_dev_read(spinor_dev, offset / blk->blk_bytes, (char *)data, len / blk->blk_bytes);
  233. ret *= blk->page_bytes;
  234. if (ret != len)
  235. {
  236. goto err;
  237. }
  238. offset += len;
  239. data += len;
  240. sz += len;
  241. size -= len;
  242. }
  243. /**
  244. * Step 3:
  245. * read the last data that not align to block size
  246. */
  247. if (size)
  248. {
  249. pr_debug("last size %u not align %u, read them\n", size, blk->blk_bytes);
  250. pr_debug("step1: read page data from addr 0x%x\n", offset);
  251. ret = rt_dev_read(spinor_dev, offset / blk->blk_bytes, page_buf, blk->page_bytes / blk->page_bytes);
  252. ret *= blk->page_bytes;
  253. if (ret != blk->page_bytes)
  254. {
  255. goto err;
  256. }
  257. pr_debug("step2: copy page data to buf with page with len %u\n", size);
  258. memcpy(data, page_buf, size);
  259. sz += size;
  260. }
  261. #ifdef DEBUG
  262. pr_debug("read data:\n");
  263. hexdump(data, sz);
  264. #endif
  265. ret = 0;
  266. goto out;
  267. err:
  268. pr_err("read failed - %d\n", (int)ret);
  269. out:
  270. if (page_buf)
  271. {
  272. free(page_buf);
  273. }
  274. return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
  275. }
  276. int do_write_without_erase(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
  277. {
  278. return rt_dev_write(dev, addr, buf, size);
  279. }
  280. static int do_erase_write_blk(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
  281. {
  282. #if 0
  283. /* The code is prepared for elmfat which mounted at spinor */
  284. int ret;
  285. uint8_t *read_buf;
  286. unsigned int align_addr = ALIGN_DOWN(addr, blk->blk_bytes);
  287. read_buf = malloc(blk->blk_bytes);
  288. if (!read_buf)
  289. {
  290. return -ENOMEM;
  291. }
  292. memset(read_buf, 0, blk->blk_bytes);
  293. ret = dev->read(dev, align_addr, read_buf, blk->blk_bytes);
  294. if (ret != blk->blk_bytes)
  295. {
  296. free(read_buf);
  297. return -EIO;
  298. }
  299. if (!(align_addr % blk->blk_bytes))
  300. {
  301. blk_dev_erase_t erase_sector;
  302. memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
  303. erase_sector.addr = align_addr;
  304. erase_sector.len = blk->blk_bytes;
  305. ret = rt_dev_control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
  306. if (ret)
  307. {
  308. free(read_buf);
  309. return ret;
  310. }
  311. }
  312. memcpy(read_buf + (addr - align_addr), buf, blk->page_bytes);
  313. ret = rt_dev_write(dev, align_addr, read_buf, blk->blk_bytes);
  314. free(read_buf);
  315. if (ret == blk->blk_bytes)
  316. {
  317. return blk->page_bytes;
  318. }
  319. else
  320. {
  321. return -EIO;
  322. }
  323. #else
  324. int ret = -1;
  325. blk_dev_erase_t erase_sector;
  326. memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
  327. erase_sector.addr = addr;
  328. erase_sector.len = size;
  329. ret = rt_dev_control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
  330. if (ret)
  331. {
  332. return -EIO;
  333. }
  334. ret = rt_dev_write(dev, addr, buf, size);
  335. if (ret == size)
  336. {
  337. return size;
  338. }
  339. else
  340. {
  341. return -EIO;
  342. }
  343. #endif
  344. }
  345. rt_size_t _part_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size, int erase_before_write)
  346. {
  347. ssize_t ret, sz = 0;
  348. struct part *part = (struct part *)dev->user_data;
  349. struct blkpart *blk = part->blk;
  350. rt_device_t spinor_dev = blk->dev;
  351. char *blk_buf = NULL;
  352. int (*pwrite)(rt_device_t dev, struct blkpart * blk, uint32_t addr, uint32_t size, char *buf);
  353. if (size == 0)
  354. {
  355. return 0;
  356. }
  357. size *= blk->blk_bytes; /* sector to size */
  358. offset *= blk->blk_bytes;
  359. if (offset >= part->bytes)
  360. {
  361. printf("write offset %lu over part size %lu\n", offset, part->bytes);
  362. return 0;
  363. }
  364. if (offset + size > part->bytes)
  365. {
  366. printf("write %s(%s) over limit: offset %lu + size %lu over %lu\n",
  367. part->name, part->devname, offset, size, part->bytes);
  368. }
  369. size = MIN(part->bytes - offset, size);
  370. pr_debug("write %s(%s) off 0x%x size %lu (erase %d)\n", part->name,
  371. part->devname, offset, size, erase_before_write);
  372. offset += part->off;
  373. if (offset % blk->blk_bytes || size % blk->blk_bytes)
  374. {
  375. blk_buf = malloc(blk->blk_bytes);
  376. if (!blk_buf)
  377. {
  378. return -ENOMEM;
  379. }
  380. memset(blk_buf, 0, blk->blk_bytes);
  381. }
  382. if (erase_before_write)
  383. {
  384. pwrite = do_erase_write_blk;
  385. }
  386. else
  387. {
  388. pwrite = do_write_without_erase;
  389. }
  390. /**
  391. * Step 1:
  392. * write the beginning data that not align to block size
  393. */
  394. if (offset % blk->blk_bytes)
  395. {
  396. uint32_t addr, poff, len;
  397. addr = ALIGN_DOWN(offset, blk->blk_bytes);
  398. poff = offset - addr;
  399. len = MIN(blk->blk_bytes - poff, size);
  400. pr_debug("offset %u not align %u, fix them before align write\n",
  401. offset, blk->blk_bytes);
  402. pr_debug("step1: read page data from addr 0x%x\n", addr);
  403. ret = rt_dev_read(spinor_dev, addr / blk->blk_bytes, blk_buf, blk->blk_bytes / blk->blk_bytes);
  404. ret *= blk->blk_bytes;
  405. if (ret != blk->blk_bytes)
  406. {
  407. goto err;
  408. }
  409. /* addr must less or equal to address */
  410. pr_debug("step2: copy buf data to page data with page offset 0x%x and len %u\n",
  411. poff, len);
  412. memcpy(blk_buf + poff, data, len);
  413. pr_debug("step3: flush the fixed page data\n");
  414. ret = pwrite(spinor_dev, blk, addr / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
  415. ret *= blk->blk_bytes;
  416. if (ret != blk->blk_bytes)
  417. {
  418. goto err;
  419. }
  420. offset += len;
  421. data += len;
  422. sz += len;
  423. size -= len;
  424. }
  425. while (size >= blk->blk_bytes)
  426. {
  427. uint32_t len = (size/blk->blk_bytes)*blk->blk_bytes;
  428. ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, len / blk->blk_bytes, (char *)data);
  429. ret *= blk->blk_bytes;
  430. if (ret != len)
  431. {
  432. goto err;
  433. }
  434. offset += len;
  435. data += len;
  436. sz += len;
  437. size -= len;
  438. }
  439. if (size)
  440. {
  441. pr_debug("last size %u not align %u, write them\n", size, blk->blk_bytes);
  442. pr_debug("step1: read page data from addr 0x%x\n", offset);
  443. memset(blk_buf, 0x00, sizeof(blk->blk_bytes));
  444. ret = rt_dev_read(spinor_dev, offset / blk->blk_bytes, blk_buf, blk->blk_bytes);
  445. if (ret != blk->blk_bytes)
  446. {
  447. goto err;
  448. }
  449. pr_debug("step2: copy buf to page data with page with len %u\n", size);
  450. memcpy(blk_buf, data, size);
  451. pr_debug("step3: flush the fixed page data\n");
  452. ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
  453. ret *= blk->blk_bytes;
  454. if (ret != blk->blk_bytes)
  455. {
  456. goto err;
  457. }
  458. sz += size;
  459. }
  460. #ifdef DEBUG
  461. pr_debug("write data:\n");
  462. hexdump(data, sz);
  463. #endif
  464. ret = 0;
  465. goto out;
  466. err:
  467. pr_err("write failed - %d\n", (int)ret);
  468. out:
  469. if (blk_buf)
  470. {
  471. free(blk_buf);
  472. }
  473. return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
  474. }
  475. rt_size_t part_erase_before_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
  476. {
  477. return _part_write(dev, offset, data, size, 1);
  478. }
  479. rt_size_t part_erase_without_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
  480. {
  481. return _part_write(dev, offset, data, size, 0);
  482. }
  483. rt_err_t part_control(rt_device_t dev, int cmd, void *args)
  484. {
  485. rt_err_t ret = -1;
  486. struct part *part = (struct part *)dev->user_data;
  487. struct blkpart *blk = part->blk;
  488. rt_device_t spinor_dev = blk->dev;
  489. struct rt_device_blk_geometry *geometry = NULL;
  490. blk_dev_erase_t *erase_sector = (blk_dev_erase_t *)args;
  491. switch (cmd)
  492. {
  493. case DEVICE_PART_CMD_ERASE_SECTOR:
  494. erase_sector = (blk_dev_erase_t *)(args);
  495. if (erase_sector->addr + erase_sector->len > part->bytes)
  496. {
  497. printf("erase %s(%s) over limit: offset %u + size %u over %lu\n",
  498. part->name, part->devname, erase_sector->addr, erase_sector->len, part->bytes);
  499. }
  500. erase_sector->len = MIN(part->bytes - erase_sector->addr, erase_sector->len);
  501. erase_sector->addr = erase_sector->addr + part->off;
  502. if (spinor_dev && rt_dev_has_control(spinor_dev))
  503. {
  504. ret = rt_dev_control(spinor_dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, erase_sector);
  505. }
  506. break;
  507. case DEVICE_PART_CMD_GET_BLOCK_SIZE:
  508. if (spinor_dev && rt_dev_has_control(spinor_dev))
  509. {
  510. ret = rt_dev_control(spinor_dev, BLOCK_DEVICE_CMD_GET_BLOCK_SIZE, args);
  511. }
  512. else
  513. {
  514. ret = -1;
  515. }
  516. break;
  517. case DEVICE_PART_CMD_GET_TOTAL_SIZE:
  518. *(unsigned int *)args = part->bytes;
  519. ret = 0;
  520. break;
  521. case RT_DEVICE_CTRL_BLK_GETGEOME:
  522. geometry = (struct rt_device_blk_geometry *)args;
  523. memset(geometry, 0, sizeof(struct rt_device_blk_geometry));
  524. if (spinor_dev && rt_dev_has_control(spinor_dev))
  525. {
  526. ret = rt_dev_control(spinor_dev, RT_DEVICE_CTRL_BLK_GETGEOME, args);
  527. if (!ret)
  528. {
  529. geometry->sector_count = part->bytes / geometry->bytes_per_sector;
  530. ret = 0;
  531. }
  532. }
  533. break;
  534. case RT_DEVICE_CTRL_BLK_ERASE:
  535. ret = 0;
  536. break;
  537. default:
  538. break;
  539. }
  540. return ret;
  541. }