nvmem.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-25 GuEe-GUI the first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtdevice.h>
  12. #define DBG_TAG "rtdm.nvmem"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. rt_err_t rt_nvmem_device_register(struct rt_nvmem_device *ndev)
  16. {
  17. struct rt_ofw_node *np;
  18. if (!ndev)
  19. {
  20. return -RT_EINVAL;
  21. }
  22. np = ndev->parent.ofw_node;
  23. if (!ndev->ignore_wp)
  24. {
  25. rt_uint8_t mode;
  26. ndev->wp_pin = rt_pin_get_named_pin(&ndev->parent, "wp", 0,
  27. &mode, &ndev->wp_pin_active);
  28. if (ndev->wp_pin < 0 && ndev->wp_pin != PIN_NONE)
  29. {
  30. return -RT_EINVAL;
  31. }
  32. else if (ndev->wp_pin >= 0)
  33. {
  34. rt_pin_mode(ndev->wp_pin, mode);
  35. }
  36. }
  37. if (!ndev->cells_nr)
  38. {
  39. rt_list_init(&ndev->cell_nodes);
  40. }
  41. rt_ref_init(&ndev->ref);
  42. ndev->read_only = rt_dm_dev_prop_read_bool(&ndev->parent, "read-only") ||
  43. ndev->read_only || !ndev->reg_write;
  44. if (np)
  45. {
  46. rt_ofw_data(np) = ndev;
  47. }
  48. return RT_EOK;
  49. }
  50. rt_err_t rt_nvmem_device_unregister(struct rt_nvmem_device *ndev)
  51. {
  52. if (!ndev)
  53. {
  54. return -RT_EINVAL;
  55. }
  56. if (rt_ref_read(&ndev->ref) != 1)
  57. {
  58. return -RT_EBUSY;
  59. }
  60. return RT_EOK;
  61. }
  62. rt_err_t rt_nvmem_device_append_cell(struct rt_nvmem_device *ndev,
  63. struct rt_nvmem_cell *cell)
  64. {
  65. rt_ubase_t level;
  66. if (!ndev)
  67. {
  68. return -RT_EINVAL;
  69. }
  70. if (!ndev->cells_nr)
  71. {
  72. rt_list_init(&ndev->cell_nodes);
  73. }
  74. rt_list_init(&cell->list);
  75. level = rt_spin_lock_irqsave(&ndev->spinlock);
  76. rt_list_insert_before(&ndev->cell_nodes, &cell->list);
  77. ++ndev->cells_nr;
  78. rt_spin_unlock_irqrestore(&ndev->spinlock, level);
  79. rt_ref_get(&ndev->ref);
  80. return RT_EOK;
  81. }
  82. rt_ssize_t rt_nvmem_cell_read(struct rt_nvmem_cell *cell, void *buffer,
  83. rt_size_t len)
  84. {
  85. rt_ssize_t res;
  86. struct rt_nvmem_device *nvmem;
  87. if (!cell || !buffer || !len)
  88. {
  89. return -RT_EINVAL;
  90. }
  91. nvmem = cell->nvmem;
  92. if (len > nvmem->size || len > cell->bytes)
  93. {
  94. return -RT_EINVAL;
  95. }
  96. if (!nvmem->reg_read)
  97. {
  98. return -RT_ENOSYS;
  99. }
  100. if ((res = nvmem->reg_read(nvmem, cell->offset, buffer, len)) < 0)
  101. {
  102. return res;
  103. }
  104. if (cell->bit_offset || cell->nbits)
  105. {
  106. /* Shift buffer */
  107. rt_uint8_t *p, *b;
  108. int extra, bit_offset = cell->bit_offset;
  109. p = b = buffer;
  110. if (bit_offset)
  111. {
  112. /* First shift */
  113. *b++ >>= bit_offset;
  114. /* Setup rest of the bytes if any */
  115. for (int i = 1; i < cell->bytes; ++i)
  116. {
  117. /* Get bits from next byte and shift them towards msb */
  118. *p |= *b << (RT_BITS_PER_BYTE - bit_offset);
  119. p = b;
  120. *b++ >>= bit_offset;
  121. }
  122. }
  123. else
  124. {
  125. /* Point to the msb */
  126. p += cell->bytes - 1;
  127. }
  128. /* Result fits in less bytes */
  129. extra = cell->bytes - RT_DIV_ROUND_UP(cell->nbits, RT_BITS_PER_BYTE);
  130. while (--extra >= 0)
  131. {
  132. *p-- = 0;
  133. }
  134. /* Clear msb bits if any leftover in the last byte */
  135. if (cell->nbits % RT_BITS_PER_BYTE)
  136. {
  137. *p &= RT_GENMASK((cell->nbits % RT_BITS_PER_BYTE) - 1, 0);
  138. }
  139. }
  140. return res;
  141. }
  142. rt_ssize_t rt_nvmem_cell_write(struct rt_nvmem_cell *cell, void *buffer,
  143. rt_size_t len)
  144. {
  145. rt_ssize_t res = 0;
  146. struct rt_nvmem_device *nvmem;
  147. if (!cell || !buffer || !len)
  148. {
  149. return -RT_EINVAL;
  150. }
  151. nvmem = cell->nvmem;
  152. if (len > nvmem->size || len > cell->bytes)
  153. {
  154. return -RT_EINVAL;
  155. }
  156. if (!nvmem->reg_write)
  157. {
  158. return -RT_ENOSYS;
  159. }
  160. if (cell->bit_offset || cell->nbits)
  161. {
  162. /* Shift buffer */
  163. int nbits, bit_offset = cell->bit_offset;
  164. rt_uint8_t v, *p, *buf, *b, pbyte, pbits;
  165. nbits = cell->nbits;
  166. buf = rt_calloc(1, cell->bytes);
  167. if (!buf)
  168. {
  169. return -RT_ENOMEM;
  170. }
  171. rt_memcpy(buf, buffer, len);
  172. p = b = buf;
  173. if (bit_offset)
  174. {
  175. pbyte = *b;
  176. *b <<= bit_offset;
  177. /* Setup the first byte with lsb bits from nvmem */
  178. if ((res = nvmem->reg_read(nvmem, cell->offset, &v, 1)) < 0)
  179. {
  180. goto _end;
  181. }
  182. *b++ |= RT_GENMASK(bit_offset - 1, 0) & v;
  183. /* Setup rest of the byte if any */
  184. for (int i = 1; i < cell->bytes; ++i)
  185. {
  186. /* Get last byte bits and shift them towards lsb */
  187. pbits = pbyte >> (RT_BITS_PER_BYTE - 1 - bit_offset);
  188. pbyte = *b;
  189. p = b;
  190. *b <<= bit_offset;
  191. *b++ |= pbits;
  192. }
  193. }
  194. /* If it's not end on byte boundary */
  195. if ((nbits + bit_offset) % RT_BITS_PER_BYTE)
  196. {
  197. /* Setup the last byte with msb bits from nvmem */
  198. if ((res = nvmem->reg_read(nvmem, cell->offset + cell->bytes - 1, &v, 1)) < 0)
  199. {
  200. goto _end;
  201. }
  202. *p |= RT_GENMASK(7, (nbits + bit_offset) % RT_BITS_PER_BYTE) & v;
  203. }
  204. buffer = buf;
  205. _end:
  206. if (res < 0)
  207. {
  208. rt_free(buf);
  209. return res;
  210. }
  211. }
  212. if (nvmem->wp_pin >= 0)
  213. {
  214. rt_pin_write(nvmem->wp_pin, !nvmem->wp_pin_active);
  215. }
  216. res = nvmem->reg_write(nvmem, cell->offset, buffer, len);
  217. if (nvmem->wp_pin >= 0)
  218. {
  219. rt_pin_write(nvmem->wp_pin, nvmem->wp_pin_active);
  220. }
  221. if (cell->bit_offset || cell->nbits)
  222. {
  223. rt_free(buffer);
  224. }
  225. return res;
  226. }
  227. rt_ssize_t rt_nvmem_cell_read_u8(struct rt_nvmem_cell *cell, rt_uint8_t *out_val)
  228. {
  229. return rt_nvmem_cell_read(cell, out_val, sizeof(*out_val));
  230. }
  231. rt_ssize_t rt_nvmem_cell_read_u16(struct rt_nvmem_cell *cell, rt_uint16_t *out_val)
  232. {
  233. return rt_nvmem_cell_read(cell, out_val, sizeof(*out_val));
  234. }
  235. rt_ssize_t rt_nvmem_cell_read_u32(struct rt_nvmem_cell *cell, rt_uint32_t *out_val)
  236. {
  237. return rt_nvmem_cell_read(cell, out_val, sizeof(*out_val));
  238. }
  239. rt_ssize_t rt_nvmem_cell_read_u64(struct rt_nvmem_cell *cell, rt_uint64_t *out_val)
  240. {
  241. return rt_nvmem_cell_read(cell, out_val, sizeof(*out_val));
  242. }
  243. static struct rt_nvmem_cell *ofw_nvmem_get_cell(struct rt_ofw_node *np,
  244. int index, const char *const_id)
  245. {
  246. rt_ubase_t level;
  247. rt_ssize_t length;
  248. const fdt32_t *addr;
  249. struct rt_nvmem_device *nvmem;
  250. struct rt_nvmem_cell *cell, *cell_tmp;
  251. struct rt_ofw_cell_args cell_args;
  252. struct rt_ofw_node *cell_np = RT_NULL, *nvmem_np = RT_NULL;
  253. if (rt_ofw_parse_phandle_cells(np,
  254. "nvmem-cells", "#nvmem-cell-cells", index, &cell_args))
  255. {
  256. return RT_NULL;
  257. }
  258. cell_np = cell_args.data;
  259. index = cell_args.args_count ? cell_args.args[0] : 0;
  260. if (!cell_np)
  261. {
  262. cell = rt_err_ptr(-RT_ERROR);
  263. goto _put_node;
  264. }
  265. /* 1.find in ofw node */
  266. if (!rt_ofw_data(cell_np))
  267. {
  268. rt_platform_ofw_request(cell_np);
  269. }
  270. cell = rt_ofw_data(cell_np);
  271. if (cell && rt_ref_read(&cell->ref) > 0)
  272. {
  273. rt_ref_get(&cell->ref);
  274. goto _put_node;
  275. }
  276. cell = rt_err_ptr(-RT_ERROR);
  277. nvmem_np = rt_ofw_get_parent(cell_np);
  278. if (!nvmem_np)
  279. {
  280. goto _put_node;
  281. }
  282. nvmem = rt_ofw_data(nvmem_np);
  283. if (!nvmem)
  284. {
  285. goto _put_node;
  286. }
  287. level = rt_spin_lock_irqsave(&nvmem->spinlock);
  288. /* 2.find in const node */
  289. rt_list_for_each_entry(cell_tmp, &nvmem->cell_nodes, list)
  290. {
  291. if (cell_tmp->index == index)
  292. {
  293. if (const_id && cell_tmp->id && rt_strcmp(const_id, cell_tmp->id))
  294. {
  295. continue;
  296. }
  297. cell = cell_tmp;
  298. if (rt_ref_read(&cell->ref))
  299. {
  300. rt_ref_get(&cell->ref);
  301. }
  302. else
  303. {
  304. cell = RT_NULL;
  305. }
  306. break;
  307. }
  308. }
  309. rt_spin_unlock_irqrestore(&nvmem->spinlock, level);
  310. if (!rt_is_err_or_null(cell))
  311. {
  312. goto _put_node;
  313. }
  314. /* 3.create a new one */
  315. cell = rt_calloc(1, sizeof(*cell));
  316. if (!cell)
  317. {
  318. cell = rt_err_ptr(-RT_ENOMEM);
  319. LOG_E("No memory to create cell: %s (%d)", const_id, index);
  320. goto _put_node;
  321. }
  322. cell->index = index;
  323. cell->id = const_id;
  324. *(rt_bool_t *)&cell->free_able = RT_TRUE;
  325. addr = rt_ofw_prop_read_raw(cell_np, "reg", &length);
  326. if (!addr || length < 2 * sizeof(rt_uint32_t))
  327. {
  328. LOG_E("%s Invalid reg", rt_ofw_node_full_name(cell_np));
  329. goto _fail;
  330. }
  331. cell->offset = fdt32_to_cpu(*addr++);
  332. cell->bytes = fdt32_to_cpu(*addr);
  333. addr = rt_ofw_prop_read_raw(cell_np, "reg", &length);
  334. if (addr && length == 2 * sizeof(rt_uint32_t))
  335. {
  336. cell->bit_offset = fdt32_to_cpu(*addr++);
  337. cell->nbits = fdt32_to_cpu(*addr);
  338. }
  339. /* user ref is '1' */
  340. rt_ref_init(&cell->ref);
  341. cell->np = cell_np;
  342. cell->nvmem = nvmem;
  343. rt_nvmem_device_append_cell(nvmem, cell);
  344. rt_ofw_node_get(cell_np);
  345. rt_ofw_data(cell_np) = cell;
  346. goto _put_node;
  347. _fail:
  348. rt_free(cell);
  349. cell = rt_err_ptr(-RT_EINVAL);
  350. _put_node:
  351. rt_ofw_node_put(cell_np);
  352. rt_ofw_node_put(nvmem_np);
  353. return cell;
  354. }
  355. struct rt_nvmem_cell *rt_nvmem_get_cell_by_index(struct rt_device *dev,
  356. int index)
  357. {
  358. if (!dev || index < 0)
  359. {
  360. return RT_NULL;
  361. }
  362. if (dev->ofw_node)
  363. {
  364. return ofw_nvmem_get_cell(dev->ofw_node, index, RT_NULL);
  365. }
  366. return rt_err_ptr(-RT_ENOSYS);
  367. }
  368. struct rt_nvmem_cell *rt_nvmem_get_cell_by_name(struct rt_device *dev,
  369. const char *id)
  370. {
  371. struct rt_ofw_node *np;
  372. if (!dev || !id)
  373. {
  374. return RT_NULL;
  375. }
  376. np = dev->ofw_node;
  377. if (np)
  378. {
  379. int index = 0;
  380. const char *const_id;
  381. struct rt_ofw_prop *prop;
  382. rt_ofw_foreach_prop_string(np, "nvmem-cell-names", prop, const_id)
  383. {
  384. if (!rt_strcmp(id, const_id))
  385. {
  386. return ofw_nvmem_get_cell(np, index, const_id);
  387. }
  388. ++index;
  389. }
  390. }
  391. return rt_err_ptr(-RT_ENOSYS);
  392. }
  393. static void nvmem_release(struct rt_ref *r)
  394. {
  395. struct rt_nvmem_device *ndev = rt_container_of(r, struct rt_nvmem_device, ref);
  396. if (ndev->parent.ofw_node)
  397. {
  398. LOG_E("%s device is release", rt_ofw_node_full_name(ndev->parent.ofw_node));
  399. }
  400. RT_ASSERT(0);
  401. }
  402. static void cell_release(struct rt_ref *r)
  403. {
  404. rt_ubase_t level;
  405. struct rt_nvmem_cell *cell = rt_container_of(r, struct rt_nvmem_cell, ref);
  406. struct rt_nvmem_device *nvmem = cell->nvmem;
  407. if (!cell->free_able)
  408. {
  409. /* only freeable cells can enter */
  410. LOG_E("%s cell is release", cell->id);
  411. RT_ASSERT(0);
  412. }
  413. if (cell->np)
  414. {
  415. rt_ofw_data(cell->np) = RT_NULL;
  416. rt_ofw_node_put(cell->np);
  417. }
  418. level = rt_spin_lock_irqsave(&nvmem->spinlock);
  419. rt_list_remove(&cell->list);
  420. --nvmem->cells_nr;
  421. rt_spin_unlock_irqrestore(&nvmem->spinlock, level);
  422. rt_ref_put(&nvmem->ref, &nvmem_release);
  423. rt_free(cell);
  424. }
  425. void rt_nvmem_put_cell(struct rt_nvmem_cell *cell)
  426. {
  427. if (!cell)
  428. {
  429. return;
  430. }
  431. rt_ref_put(&cell->ref, &cell_release);
  432. }