blkpart.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. #define pr_fmt(fmt) "blkpart: " fmt
  2. #include <stdio.h>
  3. #include <stdint.h>
  4. #include <errno.h>
  5. #include <stdlib.h>
  6. #include <stdbool.h>
  7. #include <string.h>
  8. #include <blkpart.h>
  9. #include <rtthread.h>
  10. #define MIN(a, b) ((a) > (b) ? (b) : (a))
  11. #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
  12. #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
  13. #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
  14. static struct blkpart *blk_head = NULL;
  15. void blkpart_del_list(struct blkpart *blk)
  16. {
  17. struct blkpart *pblk, *pre;
  18. if (!blk_head)
  19. {
  20. return;
  21. }
  22. pblk = pre = blk_head;
  23. for (pblk = blk_head; pblk; pre = pblk, pblk = pblk->next)
  24. {
  25. if (pblk == blk)
  26. {
  27. if (pblk == blk_head)
  28. {
  29. blk_head = NULL;
  30. }
  31. else
  32. {
  33. pre->next = pblk->next;
  34. }
  35. break;
  36. }
  37. }
  38. }
  39. void blkpart_add_list(struct blkpart *blk)
  40. {
  41. struct blkpart *pblk, *pre;
  42. blk->next = NULL;
  43. if (!blk_head)
  44. {
  45. blk_head = blk;
  46. return;
  47. }
  48. pblk = pre = blk_head;
  49. while (pblk)
  50. {
  51. pre = pblk;
  52. pblk = pblk->next;
  53. }
  54. pre->next = blk;
  55. }
  56. void del_blkpart(struct blkpart *blk)
  57. {
  58. int i;
  59. if (!blk)
  60. {
  61. return;
  62. }
  63. for (i = 0; i < blk->n_parts; i++)
  64. {
  65. struct part *part = &blk->parts[i];
  66. if (!part)
  67. {
  68. continue;
  69. }
  70. }
  71. blkpart_del_list(blk);
  72. }
  73. struct part *get_part_by_index(const char *blk_name, uint32_t index)
  74. {
  75. struct blkpart *blk = blk_head;
  76. for (blk = blk_head; blk; blk = blk->next)
  77. {
  78. if (!strcmp(blk_name, blk->name))
  79. {
  80. if (index == 0)
  81. {
  82. return &blk->root;
  83. }
  84. else if (index == PARTINDEX_THE_LAST)
  85. {
  86. return &blk->parts[blk->n_parts - 1];
  87. }
  88. else if (blk->n_parts >= index)
  89. {
  90. return &blk->parts[index - 1];
  91. }
  92. else
  93. {
  94. return NULL;
  95. }
  96. }
  97. }
  98. return NULL;
  99. }
  100. #ifdef CONFIG_BLKPART_SHOW_INFO_CMD
  101. static int part_info_main(int argc, char **argv)
  102. {
  103. int i;
  104. struct blkpart *blk;
  105. struct part *part;
  106. for (blk = blk_head; blk; blk = blk->next)
  107. {
  108. for (i = 0; i < blk->n_parts; i++)
  109. {
  110. part = &blk->parts[i];
  111. printf("%s(%s): bytes 0x%llx off 0x%llx\n", part->name, part->devname,
  112. part->bytes, part->off);
  113. }
  114. }
  115. return 0;
  116. }
  117. FINSH_FUNCTION_EXPORT_CMD(part_info_main, __cmd_part_info, dump nor partitions);
  118. #endif
  119. struct part *get_part_by_name(const char *name)
  120. {
  121. struct blkpart *blk;
  122. if (!strncmp(name, "/dev/", sizeof("/dev/") - 1))
  123. {
  124. name += sizeof("/dev/") - 1;
  125. }
  126. for (blk = blk_head; blk; blk = blk->next)
  127. {
  128. int i;
  129. for (i = 0; i < blk->n_parts; i++)
  130. {
  131. struct part *part = &blk->parts[i];
  132. if (!strcmp(part->name, name))
  133. {
  134. return part;
  135. }
  136. if (!strcmp(part->devname, name))
  137. {
  138. return part;
  139. }
  140. }
  141. }
  142. return NULL;
  143. }
  144. struct blkpart *get_blkpart_by_name(const char *name)
  145. {
  146. struct blkpart *blk;
  147. if (!name)
  148. {
  149. return blk_head;
  150. }
  151. for (blk = blk_head; blk; blk = blk->next)
  152. {
  153. if (!strcmp(blk->name, name))
  154. {
  155. return blk;
  156. }
  157. }
  158. return NULL;
  159. }
  160. rt_size_t part_read(rt_device_t dev, rt_off_t offset, void *data, rt_size_t size)
  161. {
  162. if (size == 0)
  163. {
  164. return 0;
  165. }
  166. ssize_t ret, sz = 0;
  167. struct part *part = (struct part *)dev->user_data;
  168. struct blkpart *blk = part->blk;
  169. rt_device_t spinor_dev = blk->dev;
  170. size *= blk->blk_bytes; /* sector to size */
  171. offset *= blk->blk_bytes;
  172. char *page_buf = NULL;
  173. if (offset >= part->bytes)
  174. {
  175. printf("read offset %lu over part size %lu\n", offset, part->bytes);
  176. return 0;
  177. }
  178. if (offset + size > part->bytes)
  179. {
  180. printf("read %s(%s) over limit: offset %lu + size %lu over %lu\n",
  181. part->name, part->devname, offset, size, part->bytes);
  182. }
  183. size = MIN(part->bytes - offset, size);
  184. pr_debug("read %s(%s) off 0x%x size %lu\n", part->name, part->devname,
  185. offset, size);
  186. offset += part->off;
  187. if (offset % blk->page_bytes || size % blk->page_bytes)
  188. {
  189. page_buf = malloc(blk->page_bytes);
  190. if (!page_buf)
  191. {
  192. return -ENOMEM;
  193. }
  194. memset(page_buf, 0, blk->page_bytes);
  195. }
  196. /**
  197. * Step 1:
  198. * read the beginning data that not align to block size
  199. */
  200. if (offset % blk->page_bytes)
  201. {
  202. uint32_t addr, poff, len;
  203. addr = ALIGN_DOWN(offset, blk->page_bytes);
  204. poff = offset - addr;
  205. len = MIN(blk->page_bytes - poff, size);
  206. pr_debug("offset %lu not align %u, fix them before align read\n",
  207. offset, blk->blk_bytes);
  208. pr_debug("step1: read page data from addr 0x%x\n", addr);
  209. ret = spinor_dev->read(spinor_dev, addr / blk->page_bytes, page_buf, blk->page_bytes / blk->page_bytes);
  210. ret *= blk->page_bytes;
  211. if (ret != blk->blk_bytes)
  212. {
  213. goto err;
  214. }
  215. pr_debug("step2: copy page data to buf with page offset 0x%x and len %u\n",
  216. poff, len);
  217. memcpy(data, page_buf + poff, len);
  218. offset += len;
  219. data += len;
  220. sz += len;
  221. size -= len;
  222. }
  223. /**
  224. * Step 2:
  225. * read data that align to block size
  226. */
  227. while (size >= blk->page_bytes)
  228. {
  229. uint32_t len = (size/blk->page_bytes)*blk->page_bytes;
  230. ret = spinor_dev->read(spinor_dev, offset / blk->blk_bytes, (char *)data, len / blk->blk_bytes);
  231. ret *= blk->page_bytes;
  232. if (ret != len)
  233. {
  234. goto err;
  235. }
  236. offset += len;
  237. data += len;
  238. sz += len;
  239. size -= len;
  240. }
  241. /**
  242. * Step 3:
  243. * read the last data that not align to block size
  244. */
  245. if (size)
  246. {
  247. pr_debug("last size %u not align %u, read them\n", size, blk->blk_bytes);
  248. pr_debug("step1: read page data from addr 0x%x\n", offset);
  249. ret = spinor_dev->read(spinor_dev, offset / blk->blk_bytes, page_buf, blk->page_bytes / blk->page_bytes);
  250. ret *= blk->page_bytes;
  251. if (ret != blk->page_bytes)
  252. {
  253. goto err;
  254. }
  255. pr_debug("step2: copy page data to buf with page with len %u\n", size);
  256. memcpy(data, page_buf, size);
  257. sz += size;
  258. }
  259. #ifdef DEBUG
  260. pr_debug("read data:\n");
  261. hexdump(data, sz);
  262. #endif
  263. ret = 0;
  264. goto out;
  265. err:
  266. pr_err("read failed - %d\n", (int)ret);
  267. out:
  268. if (page_buf)
  269. {
  270. free(page_buf);
  271. }
  272. return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
  273. }
  274. int do_write_without_erase(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
  275. {
  276. return dev->write(dev, addr, buf, size);
  277. }
  278. static int do_erase_write_blk(rt_device_t dev, struct blkpart *blk, uint32_t addr, uint32_t size, char *buf)
  279. {
  280. #if 0
  281. /* The code is prepared for elmfat which mounted at spinor */
  282. int ret;
  283. uint8_t *read_buf;
  284. unsigned int align_addr = ALIGN_DOWN(addr, blk->blk_bytes);
  285. read_buf = malloc(blk->blk_bytes);
  286. if (!read_buf)
  287. {
  288. return -ENOMEM;
  289. }
  290. memset(read_buf, 0, blk->blk_bytes);
  291. ret = dev->read(dev, align_addr, read_buf, blk->blk_bytes);
  292. if (ret != blk->blk_bytes)
  293. {
  294. free(read_buf);
  295. return -EIO;
  296. }
  297. if (!(align_addr % blk->blk_bytes))
  298. {
  299. blk_dev_erase_t erase_sector;
  300. memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
  301. erase_sector.addr = align_addr;
  302. erase_sector.len = blk->blk_bytes;
  303. ret = dev->control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
  304. if (ret)
  305. {
  306. free(read_buf);
  307. return ret;
  308. }
  309. }
  310. memcpy(read_buf + (addr - align_addr), buf, blk->page_bytes);
  311. ret = dev->write(dev, align_addr, read_buf, blk->blk_bytes);
  312. free(read_buf);
  313. if (ret == blk->blk_bytes)
  314. {
  315. return blk->page_bytes;
  316. }
  317. else
  318. {
  319. return -EIO;
  320. }
  321. #else
  322. int ret = -1;
  323. blk_dev_erase_t erase_sector;
  324. memset(&erase_sector, 0, sizeof(blk_dev_erase_t));
  325. erase_sector.addr = addr;
  326. erase_sector.len = size;
  327. ret = dev->control(dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, &erase_sector);
  328. if (ret)
  329. {
  330. return -EIO;
  331. }
  332. ret = dev->write(dev, addr, buf, size);
  333. if (ret == size)
  334. {
  335. return size;
  336. }
  337. else
  338. {
  339. return -EIO;
  340. }
  341. #endif
  342. }
  343. rt_size_t _part_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size, int erase_before_write)
  344. {
  345. ssize_t ret, sz = 0;
  346. struct part *part = (struct part *)dev->user_data;
  347. struct blkpart *blk = part->blk;
  348. rt_device_t spinor_dev = blk->dev;
  349. char *blk_buf = NULL;
  350. int (*pwrite)(rt_device_t dev, struct blkpart * blk, uint32_t addr, uint32_t size, char *buf);
  351. if (size == 0)
  352. {
  353. return 0;
  354. }
  355. size *= blk->blk_bytes; /* sector to size */
  356. offset *= blk->blk_bytes;
  357. if (offset >= part->bytes)
  358. {
  359. printf("write offset %lu over part size %lu\n", offset, part->bytes);
  360. return 0;
  361. }
  362. if (offset + size > part->bytes)
  363. {
  364. printf("write %s(%s) over limit: offset %lu + size %lu over %lu\n",
  365. part->name, part->devname, offset, size, part->bytes);
  366. }
  367. size = MIN(part->bytes - offset, size);
  368. pr_debug("write %s(%s) off 0x%x size %lu (erase %d)\n", part->name,
  369. part->devname, offset, size, erase_before_write);
  370. offset += part->off;
  371. if (offset % blk->blk_bytes || size % blk->blk_bytes)
  372. {
  373. blk_buf = malloc(blk->blk_bytes);
  374. if (!blk_buf)
  375. {
  376. return -ENOMEM;
  377. }
  378. memset(blk_buf, 0, blk->blk_bytes);
  379. }
  380. if (erase_before_write)
  381. {
  382. pwrite = do_erase_write_blk;
  383. }
  384. else
  385. {
  386. pwrite = do_write_without_erase;
  387. }
  388. /**
  389. * Step 1:
  390. * write the beginning data that not align to block size
  391. */
  392. if (offset % blk->blk_bytes)
  393. {
  394. uint32_t addr, poff, len;
  395. addr = ALIGN_DOWN(offset, blk->blk_bytes);
  396. poff = offset - addr;
  397. len = MIN(blk->blk_bytes - poff, size);
  398. pr_debug("offset %u not align %u, fix them before align write\n",
  399. offset, blk->blk_bytes);
  400. pr_debug("step1: read page data from addr 0x%x\n", addr);
  401. ret = spinor_dev->read(spinor_dev, addr / blk->blk_bytes, blk_buf, blk->blk_bytes / blk->blk_bytes);
  402. ret *= blk->blk_bytes;
  403. if (ret != blk->blk_bytes)
  404. {
  405. goto err;
  406. }
  407. /* addr must less or equal to address */
  408. pr_debug("step2: copy buf data to page data with page offset 0x%x and len %u\n",
  409. poff, len);
  410. memcpy(blk_buf + poff, data, len);
  411. pr_debug("step3: flush the fixed page data\n");
  412. ret = pwrite(spinor_dev, blk, addr / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
  413. ret *= blk->blk_bytes;
  414. if (ret != blk->blk_bytes)
  415. {
  416. goto err;
  417. }
  418. offset += len;
  419. data += len;
  420. sz += len;
  421. size -= len;
  422. }
  423. while (size >= blk->blk_bytes)
  424. {
  425. uint32_t len = (size/blk->blk_bytes)*blk->blk_bytes;
  426. ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, len / blk->blk_bytes, (char *)data);
  427. ret *= blk->blk_bytes;
  428. if (ret != len)
  429. {
  430. goto err;
  431. }
  432. offset += len;
  433. data += len;
  434. sz += len;
  435. size -= len;
  436. }
  437. if (size)
  438. {
  439. pr_debug("last size %u not align %u, write them\n", size, blk->blk_bytes);
  440. pr_debug("step1: read page data from addr 0x%x\n", offset);
  441. memset(blk_buf, 0x00, sizeof(blk->blk_bytes));
  442. ret = spinor_dev->read(spinor_dev, offset / blk->blk_bytes, blk_buf, blk->blk_bytes);
  443. if (ret != blk->blk_bytes)
  444. {
  445. goto err;
  446. }
  447. pr_debug("step2: copy buf to page data with page with len %u\n", size);
  448. memcpy(blk_buf, data, size);
  449. pr_debug("step3: flush the fixed page data\n");
  450. ret = pwrite(spinor_dev, blk, offset / blk->blk_bytes, blk->blk_bytes / blk->blk_bytes, blk_buf);
  451. ret *= blk->blk_bytes;
  452. if (ret != blk->blk_bytes)
  453. {
  454. goto err;
  455. }
  456. sz += size;
  457. }
  458. #ifdef DEBUG
  459. pr_debug("write data:\n");
  460. hexdump(data, sz);
  461. #endif
  462. ret = 0;
  463. goto out;
  464. err:
  465. pr_err("write failed - %d\n", (int)ret);
  466. out:
  467. if (blk_buf)
  468. {
  469. free(blk_buf);
  470. }
  471. return ret ? ret / blk->blk_bytes: sz / blk->blk_bytes;
  472. }
  473. rt_size_t part_erase_before_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
  474. {
  475. return _part_write(dev, offset, data, size, 1);
  476. }
  477. rt_size_t part_erase_without_write(rt_device_t dev, rt_off_t offset, const void *data, rt_size_t size)
  478. {
  479. return _part_write(dev, offset, data, size, 0);
  480. }
  481. rt_err_t part_control(rt_device_t dev, int cmd, void *args)
  482. {
  483. rt_err_t ret = -1;
  484. struct part *part = (struct part *)dev->user_data;
  485. struct blkpart *blk = part->blk;
  486. rt_device_t spinor_dev = blk->dev;
  487. struct rt_device_blk_geometry *geometry = NULL;
  488. blk_dev_erase_t *erase_sector = (blk_dev_erase_t *)args;
  489. switch (cmd)
  490. {
  491. case DEVICE_PART_CMD_ERASE_SECTOR:
  492. erase_sector = (blk_dev_erase_t *)(args);
  493. if (erase_sector->addr + erase_sector->len > part->bytes)
  494. {
  495. printf("erase %s(%s) over limit: offset %u + size %u over %lu\n",
  496. part->name, part->devname, erase_sector->addr, erase_sector->len, part->bytes);
  497. }
  498. erase_sector->len = MIN(part->bytes - erase_sector->addr, erase_sector->len);
  499. erase_sector->addr = erase_sector->addr + part->off;
  500. if (spinor_dev && spinor_dev->control)
  501. {
  502. ret = spinor_dev->control(spinor_dev, BLOCK_DEVICE_CMD_ERASE_SECTOR, erase_sector);
  503. }
  504. break;
  505. case DEVICE_PART_CMD_GET_BLOCK_SIZE:
  506. if (spinor_dev && spinor_dev->control)
  507. {
  508. ret = spinor_dev->control(spinor_dev, BLOCK_DEVICE_CMD_GET_BLOCK_SIZE, args);
  509. }
  510. else
  511. {
  512. ret = -1;
  513. }
  514. break;
  515. case DEVICE_PART_CMD_GET_TOTAL_SIZE:
  516. *(unsigned int *)args = part->bytes;
  517. ret = 0;
  518. break;
  519. case RT_DEVICE_CTRL_BLK_GETGEOME:
  520. geometry = (struct rt_device_blk_geometry *)args;
  521. memset(geometry, 0, sizeof(struct rt_device_blk_geometry));
  522. if (spinor_dev && spinor_dev->control)
  523. {
  524. ret = spinor_dev->control(spinor_dev, RT_DEVICE_CTRL_BLK_GETGEOME, args);
  525. if (!ret)
  526. {
  527. geometry->sector_count = part->bytes / geometry->bytes_per_sector;
  528. ret = 0;
  529. }
  530. }
  531. break;
  532. case RT_DEVICE_CTRL_BLK_ERASE:
  533. ret = 0;
  534. break;
  535. default:
  536. break;
  537. }
  538. return ret;
  539. }