mtd-cfi.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-25 GuEe-GUI the first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtdevice.h>
  12. #include <drivers/byteorder.h>
  13. #define DBG_TAG "mtd.cfi"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include "mtd-cfi.h"
  17. struct cfi_flash_device
  18. {
  19. struct rt_mtd_nor_device parent;
  20. struct rt_mutex rw_lock;
  21. rt_ubase_t sect[CFI_FLASH_SECT_MAX];
  22. rt_ubase_t protect[CFI_FLASH_SECT_MAX];
  23. rt_size_t sect_count;
  24. rt_uint8_t portwidth;
  25. rt_uint8_t chipwidth;
  26. rt_uint8_t chip_lsb;
  27. rt_uint8_t cmd_reset;
  28. rt_uint8_t cmd_erase_sector;
  29. rt_uint8_t sr_supported;
  30. rt_uint16_t ext_addr;
  31. rt_uint16_t version;
  32. rt_uint16_t offset;
  33. rt_uint16_t vendor;
  34. rt_uint16_t device_id;
  35. rt_uint16_t device_ext_id;
  36. rt_uint16_t manufacturer_id;
  37. rt_uint16_t interface;
  38. rt_ubase_t addr_unlock1;
  39. rt_ubase_t addr_unlock2;
  40. rt_ubase_t write_tout;
  41. rt_ubase_t erase_blk_tout;
  42. rt_size_t size;
  43. };
  44. #define raw_to_cfi_flash_device(raw) rt_container_of(raw, struct cfi_flash_device, parent)
  45. struct cfi_flash
  46. {
  47. int count;
  48. struct cfi_flash_device dev[];
  49. };
  50. #define __get_unaligned_t(type, ptr) \
  51. ({ \
  52. const rt_packed(struct { type x; }) *_ptr = (typeof(_ptr))(ptr); \
  53. _ptr->x; \
  54. })
  55. #define __put_unaligned_t(type, val, ptr) \
  56. do { \
  57. rt_packed(struct { type x; }) *_ptr = (typeof(_ptr))(ptr); \
  58. _ptr->x = (val); \
  59. } while (0)
  60. #define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
  61. #define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
  62. static rt_uint32_t cfi_flash_offset[2] = { FLASH_OFFSET_CFI, FLASH_OFFSET_CFI_ALT };
  63. rt_inline void *cfi_flash_map(struct cfi_flash_device *fdev, rt_off_t sect, rt_off_t offset)
  64. {
  65. rt_off_t byte_offset = offset * fdev->portwidth;
  66. return (void *)(fdev->sect[sect] + (byte_offset << fdev->chip_lsb));
  67. }
  68. rt_inline void cfi_flash_write8(void *addr, rt_uint8_t value)
  69. {
  70. HWREG8(addr) = value;
  71. }
  72. rt_inline void cfi_flash_write16(void *addr, rt_uint16_t value)
  73. {
  74. HWREG16(addr) = value;
  75. }
  76. rt_inline void cfi_flash_write32(void *addr, rt_uint32_t value)
  77. {
  78. HWREG32(addr) = value;
  79. }
  80. rt_inline void cfi_flash_write64(void *addr, rt_uint64_t value)
  81. {
  82. HWREG64(addr) = value;
  83. }
  84. rt_inline rt_uint8_t cfi_flash_read8(void *addr)
  85. {
  86. return HWREG8(addr);
  87. }
  88. rt_inline rt_uint16_t cfi_flash_read16(void *addr)
  89. {
  90. return HWREG16(addr);
  91. }
  92. rt_inline rt_uint32_t cfi_flash_read32(void *addr)
  93. {
  94. return HWREG32(addr);
  95. }
  96. rt_inline rt_uint64_t cfi_flash_read64(void *addr)
  97. {
  98. return HWREG64(addr);
  99. }
  100. rt_inline rt_uint8_t cfi_flash_read_byte(struct cfi_flash_device *fdev, rt_off_t offset)
  101. {
  102. unsigned char *cp, value;
  103. cp = cfi_flash_map(fdev, 0, offset);
  104. #ifndef ARCH_CPU_BIG_ENDIAN
  105. value = cfi_flash_read8(cp);
  106. #else
  107. value = cfi_flash_read8(cp + fdev->portwidth - 1);
  108. #endif
  109. return value;
  110. }
  111. rt_inline rt_uint16_t cfi_flash_read_word(struct cfi_flash_device *fdev, rt_off_t offset)
  112. {
  113. rt_uint16_t *addr, value;
  114. addr = cfi_flash_map(fdev, 0, offset);
  115. value = cfi_flash_read16(addr);
  116. return value;
  117. }
  118. static void cfi_flash_make_cmd(struct cfi_flash_device *fdev, rt_uint32_t cmd, void *cmdbuf)
  119. {
  120. int cword_offset, cp_offset;
  121. rt_uint8_t val, *cp = (rt_uint8_t *)cmdbuf;
  122. #ifndef ARCH_CPU_BIG_ENDIAN
  123. cmd = rt_cpu_to_le32(cmd);
  124. #endif
  125. for (int i = fdev->portwidth; i > 0; --i)
  126. {
  127. cword_offset = (fdev->portwidth - i) % fdev->chipwidth;
  128. #ifndef ARCH_CPU_BIG_ENDIAN
  129. cp_offset = fdev->portwidth - i;
  130. val = *((rt_uint8_t *)&cmd + cword_offset);
  131. #else
  132. cp_offset = i - 1;
  133. val = *((rt_uint8_t *)&cmd + sizeof(rt_uint32_t) - cword_offset - 1);
  134. #endif
  135. cp[cp_offset] = (cword_offset >= sizeof(rt_uint32_t)) ? 0x00 : val;
  136. }
  137. }
  138. static void cfi_flash_write_cmd(struct cfi_flash_device *fdev,
  139. rt_off_t sect, rt_off_t offset, rt_uint32_t cmd)
  140. {
  141. void *addr;
  142. union cfi_word word;
  143. addr = cfi_flash_map(fdev, sect, offset);
  144. cfi_flash_make_cmd(fdev, cmd, &word);
  145. switch (fdev->portwidth)
  146. {
  147. case FLASH_CFI_8BIT:
  148. cfi_flash_write8(addr, word.w8);
  149. break;
  150. case FLASH_CFI_16BIT:
  151. cfi_flash_write16(addr, word.w16);
  152. break;
  153. case FLASH_CFI_32BIT:
  154. cfi_flash_write32(addr, word.w32);
  155. break;
  156. case FLASH_CFI_64BIT:
  157. cfi_flash_write64(addr, word.w64);
  158. break;
  159. }
  160. rt_hw_isb();
  161. }
  162. static void cfi_flash_unlock_seq(struct cfi_flash_device *fdev, rt_off_t sect)
  163. {
  164. cfi_flash_write_cmd(fdev, sect, fdev->addr_unlock1, AMD_CMD_UNLOCK_START);
  165. cfi_flash_write_cmd(fdev, sect, fdev->addr_unlock2, AMD_CMD_UNLOCK_ACK);
  166. }
  167. static rt_off_t cfi_flash_find(struct cfi_flash_device *fdev, rt_ubase_t addr)
  168. {
  169. rt_off_t sect = 0;
  170. while (sect < fdev->sect_count - 1 && fdev->sect[sect] < addr)
  171. {
  172. sect++;
  173. }
  174. while (fdev->sect[sect] > addr && sect > 0)
  175. {
  176. sect--;
  177. }
  178. return sect;
  179. }
  180. static rt_bool_t cfi_flash_isequal(struct cfi_flash_device *fdev,
  181. rt_off_t sect, rt_off_t offset, rt_uint32_t cmd)
  182. {
  183. void *addr;
  184. union cfi_word word;
  185. addr = cfi_flash_map(fdev, sect, offset);
  186. cfi_flash_make_cmd(fdev, cmd, &word);
  187. switch (fdev->portwidth)
  188. {
  189. case FLASH_CFI_8BIT:
  190. return cfi_flash_read8(addr) == word.w8;
  191. case FLASH_CFI_16BIT:
  192. return cfi_flash_read16(addr) == word.w16;
  193. case FLASH_CFI_32BIT:
  194. return cfi_flash_read32(addr) == word.w32;
  195. case FLASH_CFI_64BIT:
  196. return cfi_flash_read64(addr) == word.w64;
  197. default:
  198. break;
  199. }
  200. return RT_FALSE;
  201. }
  202. static void cfi_flash_add_byte(struct cfi_flash_device *fdev,
  203. union cfi_word *word, rt_uint8_t c)
  204. {
  205. #ifndef ARCH_CPU_BIG_ENDIAN
  206. rt_uint16_t w;
  207. rt_uint32_t l;
  208. rt_uint64_t ll;
  209. #endif
  210. switch (fdev->portwidth)
  211. {
  212. case FLASH_CFI_8BIT:
  213. word->w8 = c;
  214. break;
  215. case FLASH_CFI_16BIT:
  216. #ifndef ARCH_CPU_BIG_ENDIAN
  217. w = c;
  218. w <<= 8;
  219. word->w16 = (word->w16 >> 8) | w;
  220. #else
  221. word->w16 = (word->w16 << 8) | c;
  222. #endif
  223. break;
  224. case FLASH_CFI_32BIT:
  225. #ifndef ARCH_CPU_BIG_ENDIAN
  226. l = c;
  227. l <<= 24;
  228. word->w32 = (word->w32 >> 8) | l;
  229. #else
  230. word->w32 = (word->w32 << 8) | c;
  231. #endif
  232. break;
  233. case FLASH_CFI_64BIT:
  234. #ifndef ARCH_CPU_BIG_ENDIAN
  235. ll = c;
  236. ll <<= 56;
  237. word->w64 = (word->w64 >> 8) | ll;
  238. #else
  239. word->w64 = (word->w64 << 8) | c;
  240. #endif
  241. break;
  242. }
  243. }
  244. static rt_bool_t cfi_flash_isset(struct cfi_flash_device *fdev,
  245. rt_off_t sect, rt_off_t offset, rt_uint8_t cmd)
  246. {
  247. rt_bool_t res;
  248. rt_uint8_t *addr;
  249. union cfi_word word;
  250. addr = cfi_flash_map(fdev, sect, offset);
  251. cfi_flash_make_cmd(fdev, cmd, &word);
  252. switch (fdev->portwidth)
  253. {
  254. case FLASH_CFI_8BIT:
  255. res = (cfi_flash_read8(addr) & word.w8) == word.w8;
  256. break;
  257. case FLASH_CFI_16BIT:
  258. res = (cfi_flash_read16(addr) & word.w16) == word.w16;
  259. break;
  260. case FLASH_CFI_32BIT:
  261. res = (cfi_flash_read32(addr) & word.w32) == word.w32;
  262. break;
  263. case FLASH_CFI_64BIT:
  264. res = (cfi_flash_read64(addr) & word.w64) == word.w64;
  265. break;
  266. default:
  267. res = RT_FALSE;
  268. break;
  269. }
  270. return res;
  271. }
  272. static rt_bool_t cfi_flash_toggle(struct cfi_flash_device *fdev,
  273. rt_off_t sect, rt_off_t offset, rt_uint8_t cmd)
  274. {
  275. rt_bool_t res;
  276. rt_uint8_t *addr;
  277. union cfi_word word;
  278. addr = cfi_flash_map(fdev, sect, offset);
  279. cfi_flash_make_cmd(fdev, cmd, &word);
  280. switch (fdev->portwidth)
  281. {
  282. case FLASH_CFI_8BIT:
  283. res = cfi_flash_read8(addr) != cfi_flash_read8(addr);
  284. break;
  285. case FLASH_CFI_16BIT:
  286. res = cfi_flash_read16(addr) != cfi_flash_read16(addr);
  287. break;
  288. case FLASH_CFI_32BIT:
  289. res = cfi_flash_read32(addr) != cfi_flash_read32(addr);
  290. break;
  291. case FLASH_CFI_64BIT:
  292. res = cfi_flash_read32(addr) != cfi_flash_read32(addr) ||
  293. cfi_flash_read32(addr + 4) != cfi_flash_read32(addr + 4);
  294. break;
  295. default:
  296. res = RT_FALSE;
  297. break;
  298. }
  299. return res;
  300. }
  301. static rt_bool_t cfi_flash_is_busy(struct cfi_flash_device *fdev, rt_off_t sect)
  302. {
  303. switch (fdev->vendor)
  304. {
  305. case CFI_CMDSET_INTEL_PROG_REGIONS:
  306. case CFI_CMDSET_INTEL_STANDARD:
  307. case CFI_CMDSET_INTEL_EXTENDED:
  308. return !cfi_flash_isset(fdev, sect, 0, FLASH_STATUS_DONE);
  309. case CFI_CMDSET_AMD_STANDARD:
  310. case CFI_CMDSET_AMD_EXTENDED:
  311. if (fdev->sr_supported)
  312. {
  313. cfi_flash_write_cmd(fdev, sect, fdev->addr_unlock1, FLASH_CMD_READ_STATUS);
  314. return !cfi_flash_isset(fdev, sect, 0, FLASH_STATUS_DONE);
  315. }
  316. else
  317. {
  318. return cfi_flash_toggle(fdev, sect, 0, AMD_STATUS_TOGGLE);
  319. }
  320. default:
  321. break;
  322. }
  323. return RT_FALSE;
  324. }
  325. static rt_err_t cfi_flash_status_check(struct cfi_flash_device *fdev,
  326. rt_off_t sect, rt_ubase_t tout)
  327. {
  328. rt_tick_t tick;
  329. tick = rt_tick_from_millisecond(rt_max_t(rt_ubase_t, tout / 1000, 1));
  330. tick += rt_tick_get();
  331. while (cfi_flash_is_busy(fdev, sect))
  332. {
  333. if (rt_tick_get() > tick)
  334. {
  335. cfi_flash_write_cmd(fdev, sect, 0, fdev->cmd_reset);
  336. rt_hw_us_delay(1);
  337. return -RT_ETIMEOUT;
  338. }
  339. rt_hw_us_delay(1);
  340. }
  341. return RT_EOK;
  342. }
  343. static rt_err_t cfi_flash_full_status_check(struct cfi_flash_device *fdev,
  344. rt_off_t sect, rt_ubase_t tout)
  345. {
  346. rt_err_t err;
  347. err = cfi_flash_status_check(fdev, sect, tout);
  348. switch (fdev->vendor)
  349. {
  350. case CFI_CMDSET_INTEL_PROG_REGIONS:
  351. case CFI_CMDSET_INTEL_EXTENDED:
  352. case CFI_CMDSET_INTEL_STANDARD:
  353. cfi_flash_write_cmd(fdev, sect, 0, fdev->cmd_reset);
  354. rt_hw_us_delay(1);
  355. break;
  356. default:
  357. break;
  358. }
  359. return err;
  360. }
  361. static rt_err_t cfi_flash_write_word(struct cfi_flash_device *fdev,
  362. rt_ubase_t dest, union cfi_word word)
  363. {
  364. int flag;
  365. void *dstaddr;
  366. rt_off_t sect = 0;
  367. rt_bool_t sect_found = RT_FALSE;
  368. dstaddr = (void *)dest;
  369. /* Check if Flash is (sufficiently) erased */
  370. switch (fdev->portwidth)
  371. {
  372. case FLASH_CFI_8BIT:
  373. flag = (cfi_flash_read8(dstaddr) & word.w8) == word.w8;
  374. break;
  375. case FLASH_CFI_16BIT:
  376. flag = (cfi_flash_read16(dstaddr) & word.w16) == word.w16;
  377. break;
  378. case FLASH_CFI_32BIT:
  379. flag = (cfi_flash_read32(dstaddr) & word.w32) == word.w32;
  380. break;
  381. case FLASH_CFI_64BIT:
  382. flag = (cfi_flash_read64(dstaddr) & word.w64) == word.w64;
  383. break;
  384. default:
  385. flag = 0;
  386. break;
  387. }
  388. if (!flag)
  389. {
  390. return -RT_EIO;
  391. }
  392. switch (fdev->vendor)
  393. {
  394. case CFI_CMDSET_INTEL_PROG_REGIONS:
  395. case CFI_CMDSET_INTEL_EXTENDED:
  396. case CFI_CMDSET_INTEL_STANDARD:
  397. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_CLEAR_STATUS);
  398. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_WRITE);
  399. break;
  400. case CFI_CMDSET_AMD_EXTENDED:
  401. case CFI_CMDSET_AMD_STANDARD:
  402. sect = cfi_flash_find(fdev, dest);
  403. cfi_flash_unlock_seq(fdev, sect);
  404. cfi_flash_write_cmd(fdev, sect, fdev->addr_unlock1, AMD_CMD_WRITE);
  405. sect_found = RT_TRUE;
  406. break;
  407. }
  408. switch (fdev->portwidth)
  409. {
  410. case FLASH_CFI_8BIT:
  411. cfi_flash_write8(dstaddr, word.w8);
  412. break;
  413. case FLASH_CFI_16BIT:
  414. cfi_flash_write16(dstaddr, word.w16);
  415. break;
  416. case FLASH_CFI_32BIT:
  417. cfi_flash_write32(dstaddr, word.w32);
  418. break;
  419. case FLASH_CFI_64BIT:
  420. cfi_flash_write64(dstaddr, word.w64);
  421. break;
  422. }
  423. if (!sect_found)
  424. {
  425. sect = cfi_flash_find(fdev, dest);
  426. }
  427. return cfi_flash_full_status_check(fdev, sect, fdev->write_tout);
  428. }
  429. static rt_err_t cfi_flash_read_id(struct rt_mtd_nor_device *dev)
  430. {
  431. struct cfi_flash_device *fdev = raw_to_cfi_flash_device(dev);
  432. return fdev->device_id;
  433. }
  434. static rt_ssize_t cfi_flash_read(struct rt_mtd_nor_device *dev, rt_off_t offset, rt_uint8_t *data, rt_size_t length)
  435. {
  436. struct cfi_flash_device *fdev = raw_to_cfi_flash_device(dev);
  437. rt_mutex_take(&fdev->rw_lock, RT_WAITING_FOREVER);
  438. rt_memcpy(data, (void *)fdev->sect[0] + offset, length);
  439. rt_mutex_release(&fdev->rw_lock);
  440. return length;
  441. }
  442. static rt_ssize_t cfi_flash_write(struct rt_mtd_nor_device *dev, rt_off_t offset,
  443. const rt_uint8_t *data, rt_size_t length)
  444. {
  445. int i;
  446. rt_ubase_t wp;
  447. rt_uint8_t *ptr;
  448. rt_ssize_t res;
  449. rt_size_t size, written = 0, tail_written = 0;
  450. union cfi_word word;
  451. struct cfi_flash_device *fdev = raw_to_cfi_flash_device(dev);
  452. rt_mutex_take(&fdev->rw_lock, RT_WAITING_FOREVER);
  453. offset += fdev->sect[0];
  454. wp = offset & ~(fdev->portwidth - 1);
  455. size = offset - wp;
  456. if (size)
  457. {
  458. rt_size_t head_written = 0;
  459. word.w32 = 0;
  460. ptr = (rt_uint8_t *)wp;
  461. for (i = 0; i < size; ++i)
  462. {
  463. cfi_flash_add_byte(fdev, &word, cfi_flash_read8(ptr + i));
  464. }
  465. for (; i < fdev->portwidth && length > 0; ++i)
  466. {
  467. cfi_flash_add_byte(fdev, &word, *data++);
  468. --length;
  469. ++head_written;
  470. }
  471. for (; !length && i < fdev->portwidth; ++i)
  472. {
  473. cfi_flash_add_byte(fdev, &word, cfi_flash_read8(ptr + i));
  474. }
  475. if ((res = cfi_flash_write_word(fdev, wp, word)))
  476. {
  477. goto _out_lock;
  478. }
  479. written += head_written;
  480. wp += i;
  481. }
  482. /* Handle the aligned part */
  483. while (length >= fdev->portwidth)
  484. {
  485. word.w32 = 0;
  486. for (i = 0; i < fdev->portwidth; ++i)
  487. {
  488. cfi_flash_add_byte(fdev, &word, *data++);
  489. }
  490. if ((res = cfi_flash_write_word(fdev, wp, word)))
  491. {
  492. goto _out_lock;
  493. }
  494. wp += fdev->portwidth;
  495. length -= fdev->portwidth;
  496. written += fdev->portwidth; /* new: accumulate bytes */
  497. }
  498. if (!length)
  499. {
  500. res = written;
  501. goto _out_lock;
  502. }
  503. /* Handle unaligned tail bytes */
  504. word.w32 = 0;
  505. ptr = (rt_uint8_t *)wp;
  506. for (i = 0; i < fdev->portwidth && length > 0; ++i)
  507. {
  508. cfi_flash_add_byte(fdev, &word, *data++);
  509. --length;
  510. ++tail_written;
  511. }
  512. for (; i < fdev->portwidth; ++i)
  513. {
  514. cfi_flash_add_byte(fdev, &word, cfi_flash_read8(ptr + i));
  515. }
  516. if ((res = cfi_flash_write_word(fdev, wp, word)))
  517. {
  518. goto _out_lock;
  519. }
  520. written += tail_written;
  521. res = written;
  522. _out_lock:
  523. rt_mutex_release(&fdev->rw_lock);
  524. return res;
  525. }
  526. static rt_err_t cfi_flash_erase_block(struct rt_mtd_nor_device *dev,
  527. rt_off_t offset, rt_size_t length)
  528. {
  529. rt_err_t err = RT_EOK;
  530. rt_ubase_t sect, sect_end;
  531. struct cfi_flash_device *fdev = raw_to_cfi_flash_device(dev);
  532. rt_mutex_take(&fdev->rw_lock, RT_WAITING_FOREVER);
  533. sect = cfi_flash_find(fdev, fdev->sect[0] + offset);
  534. sect_end = cfi_flash_find(fdev, fdev->sect[0] + offset + length);
  535. for (; sect <= sect_end; ++sect)
  536. {
  537. if (fdev->protect[sect])
  538. {
  539. continue;
  540. }
  541. switch (fdev->vendor)
  542. {
  543. case CFI_CMDSET_INTEL_PROG_REGIONS:
  544. case CFI_CMDSET_INTEL_STANDARD:
  545. case CFI_CMDSET_INTEL_EXTENDED:
  546. cfi_flash_write_cmd(fdev, sect, 0, FLASH_CMD_CLEAR_STATUS);
  547. cfi_flash_write_cmd(fdev, sect, 0, FLASH_CMD_BLOCK_ERASE);
  548. cfi_flash_write_cmd(fdev, sect, 0, FLASH_CMD_ERASE_CONFIRM);
  549. break;
  550. case CFI_CMDSET_AMD_STANDARD:
  551. case CFI_CMDSET_AMD_EXTENDED:
  552. cfi_flash_unlock_seq(fdev, sect);
  553. cfi_flash_write_cmd(fdev, sect, fdev->addr_unlock1, AMD_CMD_ERASE_START);
  554. cfi_flash_unlock_seq(fdev, sect);
  555. cfi_flash_write_cmd(fdev, sect, 0, fdev->cmd_erase_sector);
  556. break;
  557. default:
  558. break;
  559. }
  560. err = cfi_flash_full_status_check(fdev, sect, fdev->erase_blk_tout);
  561. }
  562. rt_mutex_release(&fdev->rw_lock);
  563. return err;
  564. }
  565. const static struct rt_mtd_nor_driver_ops cfi_flash_ops =
  566. {
  567. .read_id = cfi_flash_read_id,
  568. .read = cfi_flash_read,
  569. .write = cfi_flash_write,
  570. .erase_block = cfi_flash_erase_block,
  571. };
  572. static rt_bool_t cfi_flash_detect_info(struct cfi_flash_device *fdev, struct cfi_query *query)
  573. {
  574. rt_uint8_t *buffer;
  575. /* Reset, unknown reset's cmd now, so try Intel and AMD both. */
  576. cfi_flash_write_cmd(fdev, 0, 0, AMD_CMD_RESET);
  577. rt_hw_us_delay(1);
  578. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_RESET);
  579. for (int i = 0; i < RT_ARRAY_SIZE(cfi_flash_offset); ++i)
  580. {
  581. cfi_flash_write_cmd(fdev, 0, cfi_flash_offset[i], FLASH_CMD_CFI);
  582. if (cfi_flash_isequal(fdev, 0, FLASH_OFFSET_CFI_RESP, 'Q') &&
  583. cfi_flash_isequal(fdev, 0, FLASH_OFFSET_CFI_RESP + 1, 'R') &&
  584. cfi_flash_isequal(fdev, 0, FLASH_OFFSET_CFI_RESP + 2, 'Y'))
  585. {
  586. buffer = (void *)query;
  587. for (int byte = 0; byte < sizeof(*query); ++byte)
  588. {
  589. buffer[byte] = cfi_flash_read_byte(fdev, FLASH_OFFSET_CFI_RESP + byte);
  590. }
  591. fdev->interface = rt_le16_to_cpu(query->interface_desc);
  592. /*
  593. * Some flash chips can support multiple bus widths.
  594. * In this case, override the interface width and
  595. * limit it to the port width.
  596. */
  597. if (fdev->interface == FLASH_CFI_X8X16 &&
  598. fdev->portwidth == FLASH_CFI_8BIT)
  599. {
  600. fdev->interface = FLASH_CFI_X8;
  601. }
  602. else if (fdev->interface == FLASH_CFI_X16X32 &&
  603. fdev->portwidth == FLASH_CFI_16BIT)
  604. {
  605. fdev->interface = FLASH_CFI_X16;
  606. }
  607. fdev->offset = cfi_flash_offset[i];
  608. fdev->addr_unlock1 = 0x555;
  609. fdev->addr_unlock2 = 0x2aa;
  610. /* modify the unlock address if we are in compatibility mode */
  611. if ((fdev->chipwidth == FLASH_CFI_BY8 && /* x8/x16 in x8 mode */
  612. fdev->interface == FLASH_CFI_X8X16) ||
  613. (fdev->chipwidth == FLASH_CFI_BY16 && /* x16/x32 in x16 mode */
  614. fdev->interface == FLASH_CFI_X16X32))
  615. {
  616. fdev->addr_unlock1 = 0xaaa;
  617. fdev->addr_unlock2 = 0x555;
  618. }
  619. return RT_TRUE;
  620. }
  621. }
  622. return RT_FALSE;
  623. }
  624. static rt_bool_t cfi_flash_detect(struct cfi_flash_device *fdev, struct cfi_query *query)
  625. {
  626. for (fdev->portwidth = FLASH_CFI_8BIT;
  627. fdev->portwidth <= FLASH_CFI_64BIT;
  628. fdev->portwidth <<= 1)
  629. {
  630. for (fdev->chipwidth = FLASH_CFI_BY8;
  631. fdev->chipwidth <= fdev->portwidth;
  632. fdev->chipwidth <<= 1)
  633. {
  634. /*
  635. * First, try detection without shifting the addresses
  636. * for 8bit devices (16bit wide connection)
  637. */
  638. fdev->chip_lsb = 0;
  639. if (cfi_flash_detect_info(fdev, query))
  640. {
  641. return RT_TRUE;
  642. }
  643. /* Not detected, so let's try with shifting for 8bit devices */
  644. fdev->chip_lsb = 1;
  645. if (cfi_flash_detect_info(fdev, query))
  646. {
  647. return RT_TRUE;
  648. }
  649. }
  650. }
  651. return RT_FALSE;
  652. }
  653. static void cfi_cmdset_intel_init(struct cfi_flash_device *fdev, struct cfi_query *query)
  654. {
  655. fdev->cmd_reset = FLASH_CMD_RESET;
  656. /* Intel read jedec IDs */
  657. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_RESET);
  658. rt_hw_us_delay(1);
  659. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_READ_ID);
  660. rt_hw_us_delay(1000);
  661. fdev->manufacturer_id = cfi_flash_read_byte(fdev, FLASH_OFFSET_MANUFACTURER_ID);
  662. fdev->device_id = (fdev->chipwidth == FLASH_CFI_16BIT) ?
  663. cfi_flash_read_word(fdev, FLASH_OFFSET_DEVICE_ID) :
  664. cfi_flash_read_byte(fdev, FLASH_OFFSET_DEVICE_ID);
  665. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_RESET);
  666. /* Read end */
  667. cfi_flash_write_cmd(fdev, 0, fdev->offset, FLASH_CMD_CFI);
  668. }
  669. static void cfi_cmdset_amd_init(struct cfi_flash_device *fdev, struct cfi_query *query)
  670. {
  671. rt_uint16_t bank_id = 0;
  672. rt_uint8_t manu_id, feature;
  673. fdev->cmd_reset = AMD_CMD_RESET;
  674. fdev->cmd_erase_sector = AMD_CMD_ERASE_SECTOR;
  675. /* AMD read jedec IDs */
  676. cfi_flash_write_cmd(fdev, 0, 0, AMD_CMD_RESET);
  677. cfi_flash_unlock_seq(fdev, 0);
  678. cfi_flash_write_cmd(fdev, 0, fdev->addr_unlock1, FLASH_CMD_READ_ID);
  679. rt_hw_us_delay(1000);
  680. manu_id = cfi_flash_read_byte(fdev, FLASH_OFFSET_MANUFACTURER_ID);
  681. /* JEDEC JEP106Z specifies ID codes up to bank 7 */
  682. while (manu_id == FLASH_CONTINUATION_CODE && bank_id < 0x800)
  683. {
  684. bank_id += 0x100;
  685. manu_id = cfi_flash_read_byte(fdev, bank_id | FLASH_OFFSET_MANUFACTURER_ID);
  686. }
  687. fdev->manufacturer_id = manu_id;
  688. if (fdev->ext_addr && fdev->version >= 0x3134)
  689. {
  690. /* Read software feature (at 0x53) */
  691. feature = cfi_flash_read_byte(fdev, fdev->ext_addr + 0x13);
  692. fdev->sr_supported = feature & 0x1;
  693. }
  694. switch (fdev->chipwidth)
  695. {
  696. case FLASH_CFI_8BIT:
  697. fdev->device_id = cfi_flash_read_byte(fdev, FLASH_OFFSET_DEVICE_ID);
  698. if (fdev->device_id == 0x7e)
  699. {
  700. /* AMD 3-byte (expanded) device ids */
  701. fdev->device_ext_id = cfi_flash_read_byte(fdev, FLASH_OFFSET_DEVICE_ID2);
  702. fdev->device_ext_id <<= 8;
  703. fdev->device_ext_id |= cfi_flash_read_byte(fdev, FLASH_OFFSET_DEVICE_ID3);
  704. }
  705. break;
  706. case FLASH_CFI_16BIT:
  707. fdev->device_id = cfi_flash_read_word(fdev, FLASH_OFFSET_DEVICE_ID);
  708. if ((fdev->device_id & 0xff) == 0x7e)
  709. {
  710. /* AMD 3-byte (expanded) device ids */
  711. fdev->device_ext_id = cfi_flash_read_byte(fdev, FLASH_OFFSET_DEVICE_ID2);
  712. fdev->device_ext_id <<= 8;
  713. fdev->device_ext_id |= cfi_flash_read_byte(fdev, FLASH_OFFSET_DEVICE_ID3);
  714. }
  715. break;
  716. default:
  717. break;
  718. }
  719. cfi_flash_write_cmd(fdev, 0, 0, AMD_CMD_RESET);
  720. rt_hw_us_delay(1);
  721. /* Read end */
  722. cfi_flash_write_cmd(fdev, 0, fdev->offset, FLASH_CMD_CFI);
  723. }
  724. static void cfi_reverse_geometry(struct cfi_query *query)
  725. {
  726. rt_uint32_t info;
  727. for (int i = 0, j = query->num_erase_regions - 1; i < j; ++i, --j)
  728. {
  729. info = get_unaligned(&query->erase_region_info[i]);
  730. put_unaligned(get_unaligned(&query->erase_region_info[j]),
  731. &query->erase_region_info[i]);
  732. put_unaligned(info, &query->erase_region_info[j]);
  733. }
  734. }
  735. static void cfi_flash_fixup_amd(struct cfi_flash_device *fdev, struct cfi_query *query)
  736. {
  737. /* check if flash geometry needs reversal */
  738. if (query->num_erase_regions > 1)
  739. {
  740. /* reverse geometry if top boot part */
  741. if (fdev->version < 0x3131)
  742. {
  743. /* CFI < 1.1, try to guess from device id */
  744. if ((fdev->device_id & 0x80) != 0)
  745. {
  746. cfi_reverse_geometry(query);
  747. }
  748. }
  749. else if (cfi_flash_read_byte(fdev, fdev->ext_addr + 0xf) == 3)
  750. {
  751. /*
  752. * CFI >= 1.1, deduct from top/bottom flag,
  753. * ext_addr is valid since cfi version > 0.
  754. */
  755. cfi_reverse_geometry(query);
  756. }
  757. }
  758. }
  759. static void cfi_flash_fixup_atmel(struct cfi_flash_device *fdev, struct cfi_query *query)
  760. {
  761. int reverse_geometry = 0;
  762. /* Check the "top boot" bit in the PRI */
  763. if (fdev->ext_addr && !(cfi_flash_read_byte(fdev, fdev->ext_addr + 6) & 1))
  764. {
  765. reverse_geometry = 1;
  766. }
  767. /*
  768. * AT49BV6416(T) list the erase regions in the wrong order.
  769. * However, the device ID is identical with the non-broken
  770. * AT49BV642D they differ in the high byte.
  771. */
  772. if (fdev->device_id == 0xd6 || fdev->device_id == 0xd2)
  773. {
  774. reverse_geometry = !reverse_geometry;
  775. }
  776. if (reverse_geometry)
  777. {
  778. cfi_reverse_geometry(query);
  779. }
  780. }
  781. static void cfi_flash_fixup_stm(struct cfi_flash_device *fdev, struct cfi_query *query)
  782. {
  783. /* Check if flash geometry needs reversal */
  784. if (query->num_erase_regions > 1)
  785. {
  786. /* Reverse geometry if top boot part */
  787. if (fdev->version < 0x3131)
  788. {
  789. /*
  790. * CFI < 1.1, guess by device id:
  791. * M29W320DT, M29W320ET, M29W800DT
  792. */
  793. if (fdev->device_id == 0x22ca ||
  794. fdev->device_id == 0x2256 ||
  795. fdev->device_id == 0x22d7)
  796. {
  797. cfi_reverse_geometry(query);
  798. }
  799. }
  800. else if (cfi_flash_read_byte(fdev, fdev->ext_addr + 0xf) == 3)
  801. {
  802. /*
  803. * CFI >= 1.1, deduct from top/bottom flag,
  804. * ext_addr is valid since cfi version > 0.
  805. */
  806. cfi_reverse_geometry(query);
  807. }
  808. }
  809. }
  810. static void cfi_flash_fixup_sst(struct cfi_flash_device *fdev, struct cfi_query *query)
  811. {
  812. /*
  813. * SST, for many recent nor parallel flashes, says they are
  814. * CFI-conformant. This is not true, since qry struct.
  815. * reports a std. AMD command set (0x0002), while SST allows to
  816. * erase two different sector sizes for the same memory.
  817. * 64KB sector (SST call it block) needs 0x30 to be erased.
  818. * 4KB sector (SST call it sector) needs 0x50 to be erased.
  819. * Since CFI query detect the 4KB number of sectors, users expects
  820. * a sector granularity of 4KB, and it is here set.
  821. */
  822. /* SST39VF3201B, SST39VF3202B */
  823. if (fdev->device_id == 0x5d23 || fdev->device_id == 0x5c23)
  824. {
  825. /* Set sector granularity to 4KB */
  826. fdev->cmd_erase_sector = 0x50;
  827. }
  828. }
  829. static void cfi_flash_fixup_num(struct cfi_flash_device *fdev, struct cfi_query *query)
  830. {
  831. /*
  832. * The M29EW devices seem to report the CFI information wrong
  833. * when it's in 8 bit mode.
  834. * There's an app note from Numonyx on this issue.
  835. * So adjust the buffer size for M29EW while operating in 8-bit mode
  836. */
  837. if (query->max_buf_write_size > 0x8 && fdev->device_id == 0x7e &&
  838. (fdev->device_ext_id == 0x2201 || fdev->device_ext_id == 0x2301 ||
  839. fdev->device_ext_id == 0x2801 || fdev->device_ext_id == 0x4801))
  840. {
  841. query->max_buf_write_size = 0x8;
  842. }
  843. }
  844. static rt_err_t cfi_flash_dev_probe(struct rt_device *dev, struct cfi_flash_device *fdev, int index)
  845. {
  846. rt_err_t err;
  847. int size_ratio;
  848. const char *name;
  849. rt_uint64_t addr, size;
  850. rt_ubase_t sect, value;
  851. rt_size_t max_size, sect_count;
  852. int num_erase_regions, erase_region_count, erase_region_size;
  853. struct cfi_query *query = rt_malloc(sizeof(*query));
  854. if (!query)
  855. {
  856. return -RT_ENOMEM;
  857. }
  858. if (rt_dm_dev_get_address(dev, index, &addr, &size) < 0)
  859. {
  860. err = -RT_EIO;
  861. goto _fail;
  862. }
  863. fdev->sect[0] = (rt_ubase_t)rt_ioremap((void *)addr, size);
  864. if (!cfi_flash_detect(fdev, query))
  865. {
  866. err = -RT_EEMPTY;
  867. goto _fail;
  868. }
  869. fdev->vendor = rt_le16_to_cpu(get_unaligned(&query->primary_id));
  870. fdev->ext_addr = rt_le16_to_cpu(get_unaligned(&query->primary_address));
  871. num_erase_regions = query->num_erase_regions;
  872. if (fdev->ext_addr)
  873. {
  874. fdev->version = cfi_flash_read_byte(fdev, fdev->ext_addr + 3) << 8;
  875. fdev->version |= cfi_flash_read_byte(fdev, fdev->ext_addr + 4);
  876. }
  877. switch (fdev->vendor)
  878. {
  879. case CFI_CMDSET_INTEL_PROG_REGIONS:
  880. case CFI_CMDSET_INTEL_STANDARD:
  881. case CFI_CMDSET_INTEL_EXTENDED:
  882. cfi_cmdset_intel_init(fdev, query);
  883. break;
  884. case CFI_CMDSET_AMD_STANDARD:
  885. case CFI_CMDSET_AMD_EXTENDED:
  886. cfi_cmdset_amd_init(fdev, query);
  887. break;
  888. default:
  889. /* Try an Intel-style reset*/
  890. cfi_flash_write_cmd(fdev, 0, 0, FLASH_CMD_RESET);
  891. break;
  892. }
  893. switch (fdev->manufacturer_id)
  894. {
  895. case 0x0001: /* AMD */
  896. case 0x0037: /* AMIC */
  897. cfi_flash_fixup_amd(fdev, query);
  898. break;
  899. case 0x001f:
  900. cfi_flash_fixup_atmel(fdev, query);
  901. break;
  902. case 0x0020:
  903. cfi_flash_fixup_stm(fdev, query);
  904. break;
  905. case 0x00bf: /* SST */
  906. cfi_flash_fixup_sst(fdev, query);
  907. break;
  908. case 0x0089: /* Numonyx */
  909. cfi_flash_fixup_num(fdev, query);
  910. break;
  911. }
  912. size_ratio = fdev->portwidth / fdev->chipwidth;
  913. if (fdev->interface == FLASH_CFI_X8X16 && fdev->chipwidth == FLASH_CFI_BY8)
  914. {
  915. size_ratio >>= 1;
  916. }
  917. fdev->size = 1 << query->dev_size;
  918. /* Multiply the size by the number of chips */
  919. fdev->size *= size_ratio;
  920. max_size = size;
  921. if (max_size && fdev->size > max_size)
  922. {
  923. fdev->size = max_size;
  924. }
  925. sect_count = 0;
  926. sect = fdev->sect[0];
  927. for (int i = 0; i < num_erase_regions; ++i)
  928. {
  929. if (i > RT_ARRAY_SIZE(query->erase_region_info))
  930. {
  931. LOG_E("Too many %d (> %d) erase regions found",
  932. num_erase_regions, RT_ARRAY_SIZE(query->erase_region_info));
  933. break;
  934. }
  935. value = rt_le32_to_cpu(get_unaligned(&query->erase_region_info[i]));
  936. erase_region_count = (value & 0xffff) + 1;
  937. value >>= 16;
  938. erase_region_size = (value & 0xffff) ? ((value & 0xffff) * 256) : 128;
  939. for (int j = 0; j < erase_region_count; ++j)
  940. {
  941. if (sect - fdev->sect[0] >= fdev->size)
  942. {
  943. break;
  944. }
  945. if (sect_count > RT_ARRAY_SIZE(fdev->sect))
  946. {
  947. LOG_E("Too many %d (> %d) sector found",
  948. sect_count, RT_ARRAY_SIZE(fdev->sect));
  949. break;
  950. }
  951. fdev->sect[sect_count] = sect;
  952. sect += (erase_region_size * size_ratio);
  953. switch (fdev->vendor)
  954. {
  955. case CFI_CMDSET_INTEL_PROG_REGIONS:
  956. case CFI_CMDSET_INTEL_EXTENDED:
  957. case CFI_CMDSET_INTEL_STANDARD:
  958. /*
  959. * Set flash to read-id mode. Otherwise
  960. * reading protected status is not guaranteed.
  961. */
  962. cfi_flash_write_cmd(fdev, sect_count, 0, FLASH_CMD_READ_ID);
  963. fdev->protect[sect_count] = cfi_flash_isset(fdev,
  964. sect_count, FLASH_OFFSET_PROTECT, FLASH_STATUS_PROTECT);
  965. cfi_flash_write_cmd(fdev, sect_count, 0, FLASH_CMD_RESET);
  966. break;
  967. case CFI_CMDSET_AMD_EXTENDED:
  968. case CFI_CMDSET_AMD_STANDARD:
  969. default:
  970. /* Default: not protected */
  971. fdev->protect[sect_count] = RT_NULL;
  972. }
  973. ++sect_count;
  974. }
  975. fdev->sect_count = sect_count;
  976. }
  977. if (fdev->interface == FLASH_CFI_X8X16 && fdev->chipwidth == FLASH_CFI_BY8)
  978. {
  979. /* Need to test on x8/x16 in parallel. */
  980. fdev->portwidth >>= 1;
  981. }
  982. value = 1 << query->block_erase_timeout_type;
  983. fdev->erase_blk_tout = value * (1 << query->block_erase_timeout_max);
  984. value = (1 << query->word_write_timeout_type) * (1 << query->word_write_timeout_max);
  985. /* Round up when converting to ms */
  986. fdev->write_tout = (value + 999) / 1000;
  987. cfi_flash_write_cmd(fdev, 0, 0, fdev->cmd_reset);
  988. fdev->parent.ops = &cfi_flash_ops;
  989. fdev->parent.block_start = 0;
  990. fdev->parent.block_end = fdev->sect_count;
  991. fdev->parent.block_size = size / fdev->sect_count;
  992. if ((err = rt_dm_dev_set_name_auto(&fdev->parent.parent, "nor")) < 0)
  993. {
  994. goto _fail;
  995. }
  996. name = rt_dm_dev_get_name(&fdev->parent.parent);
  997. if ((err = rt_mutex_init(&fdev->rw_lock, name, RT_IPC_FLAG_PRIO)))
  998. {
  999. goto _fail;
  1000. }
  1001. if ((err = rt_mtd_nor_register_device(name, &fdev->parent)))
  1002. {
  1003. goto _fail;
  1004. }
  1005. rt_free(query);
  1006. return RT_EOK;
  1007. _fail:
  1008. if (fdev->sect[0])
  1009. {
  1010. rt_iounmap((void *)fdev->sect[0]);
  1011. }
  1012. rt_free(query);
  1013. return err;
  1014. }
  1015. static void cfi_flash_dev_remove(struct rt_device *dev, struct cfi_flash_device *fdev)
  1016. {
  1017. if (fdev->rw_lock.parent.parent.name[0])
  1018. {
  1019. rt_mutex_detach(&fdev->rw_lock);
  1020. }
  1021. if (fdev->sect[0])
  1022. {
  1023. rt_iounmap((void *)fdev->sect[0]);
  1024. }
  1025. }
  1026. static rt_err_t cfi_flash_probe(struct rt_platform_device *pdev)
  1027. {
  1028. rt_err_t err;
  1029. rt_size_t count;
  1030. struct cfi_flash *cfi;
  1031. struct rt_device *dev = &pdev->parent;
  1032. if ((count = rt_dm_dev_get_address_count(dev)) <= 0)
  1033. {
  1034. return -RT_EEMPTY;
  1035. }
  1036. cfi = rt_calloc(1, sizeof(*cfi) + count * sizeof(cfi->dev[0]));
  1037. if (!cfi)
  1038. {
  1039. return -RT_ENOMEM;
  1040. }
  1041. cfi->count = count;
  1042. for (int i = 0; i < cfi->count; ++i)
  1043. {
  1044. if ((err = cfi_flash_dev_probe(dev, &cfi->dev[i], i)))
  1045. {
  1046. goto _fail;
  1047. }
  1048. }
  1049. dev->user_data = cfi;
  1050. return RT_EOK;
  1051. _fail:
  1052. for (int i = 0; i < cfi->count; ++i)
  1053. {
  1054. cfi_flash_dev_remove(dev, &cfi->dev[i]);
  1055. }
  1056. rt_free(cfi);
  1057. return err;
  1058. }
  1059. static rt_err_t cfi_flash_remove(struct rt_platform_device *pdev)
  1060. {
  1061. struct rt_device *dev = &pdev->parent;
  1062. struct cfi_flash *cfi = pdev->parent.user_data;
  1063. for (int i = 0; i < cfi->count; ++i)
  1064. {
  1065. cfi_flash_dev_remove(dev, &cfi->dev[i]);
  1066. }
  1067. rt_free(cfi);
  1068. return RT_EOK;
  1069. }
  1070. static const struct rt_ofw_node_id cfi_flash_ofw_ids[] =
  1071. {
  1072. { .compatible = "cfi-flash" },
  1073. { .compatible = "jedec-flash" },
  1074. { /* sentinel */ }
  1075. };
  1076. static struct rt_platform_driver cfi_flash_driver =
  1077. {
  1078. .name = "cfi-flash",
  1079. .ids = cfi_flash_ofw_ids,
  1080. .probe = cfi_flash_probe,
  1081. .remove = cfi_flash_remove,
  1082. };
  1083. RT_PLATFORM_DRIVER_EXPORT(cfi_flash_driver);