clk-rk-divider.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-3-08 GuEe-GUI the first version
  9. */
  10. #include "clk-rk-divider.h"
  11. struct clk_rate_request
  12. {
  13. rt_ubase_t rate;
  14. rt_ubase_t best_parent_rate;
  15. struct rt_clk_cell *best_parent_cell;
  16. };
  17. #define clk_div_mask(width) ((1 << (width)) - 1)
  18. rt_inline rt_uint32_t clk_div_readl(struct rockchip_clk_cell *rk_cell)
  19. {
  20. void *base = rk_cell->provider->reg_base;
  21. base += rk_cell->div_offset ? : rk_cell->muxdiv_offset;
  22. if (rk_cell->div_flags & CLK_DIVIDER_BIG_ENDIAN)
  23. {
  24. return rt_be32_to_cpu(HWREG32(base));
  25. }
  26. return HWREG32(base);
  27. }
  28. rt_inline void clk_div_writel(struct rockchip_clk_cell *rk_cell, rt_uint32_t val)
  29. {
  30. void *base = rk_cell->provider->reg_base;
  31. base += rk_cell->div_offset ? : rk_cell->muxdiv_offset;
  32. if (rk_cell->div_flags & CLK_DIVIDER_BIG_ENDIAN)
  33. {
  34. HWREG32(base) = rt_cpu_to_be32(val);
  35. }
  36. else
  37. {
  38. HWREG32(base) = val;
  39. }
  40. }
  41. static rt_uint32_t _get_table_maxdiv(const struct clk_div_table *table, rt_uint8_t width)
  42. {
  43. rt_uint32_t maxdiv = 0, mask = clk_div_mask(width);
  44. const struct clk_div_table *clkt;
  45. for (clkt = table; clkt->div; ++clkt)
  46. {
  47. if (clkt->div > maxdiv && clkt->val <= mask)
  48. {
  49. maxdiv = clkt->div;
  50. }
  51. }
  52. return maxdiv;
  53. }
  54. static rt_uint32_t _get_table_mindiv(const struct clk_div_table *table)
  55. {
  56. rt_uint32_t mindiv = RT_UINT32_MAX;
  57. const struct clk_div_table *clkt;
  58. for (clkt = table; clkt->div; ++clkt)
  59. {
  60. if (clkt->div < mindiv)
  61. {
  62. mindiv = clkt->div;
  63. }
  64. }
  65. return mindiv;
  66. }
  67. static rt_uint32_t _get_maxdiv(const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags)
  68. {
  69. if (flags & CLK_DIVIDER_ONE_BASED)
  70. {
  71. return clk_div_mask(width);
  72. }
  73. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  74. {
  75. return 1 << clk_div_mask(width);
  76. }
  77. if (table)
  78. {
  79. return _get_table_maxdiv(table, width);
  80. }
  81. return clk_div_mask(width) + 1;
  82. }
  83. static rt_uint32_t _get_table_div(const struct clk_div_table *table, rt_uint32_t val)
  84. {
  85. const struct clk_div_table *clkt;
  86. for (clkt = table; clkt->div; ++clkt)
  87. {
  88. if (clkt->val == val)
  89. {
  90. return clkt->div;
  91. }
  92. }
  93. return 0;
  94. }
  95. static rt_uint32_t _get_div(const struct clk_div_table *table,
  96. rt_uint32_t val, rt_ubase_t flags, rt_uint8_t width)
  97. {
  98. if (flags & CLK_DIVIDER_ONE_BASED)
  99. {
  100. return val;
  101. }
  102. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  103. {
  104. return 1 << val;
  105. }
  106. if (flags & CLK_DIVIDER_MAX_AT_ZERO)
  107. {
  108. return val ? val : clk_div_mask(width) + 1;
  109. }
  110. if (table)
  111. {
  112. return _get_table_div(table, val);
  113. }
  114. return val + 1;
  115. }
  116. static rt_uint32_t _get_table_val(const struct clk_div_table *table, rt_uint32_t div)
  117. {
  118. const struct clk_div_table *clkt;
  119. for (clkt = table; clkt->div; ++clkt)
  120. {
  121. if (clkt->div == div)
  122. {
  123. return clkt->val;
  124. }
  125. }
  126. return 0;
  127. }
  128. static rt_uint32_t _get_val(const struct clk_div_table *table,
  129. rt_uint32_t div, rt_ubase_t flags, rt_uint8_t width)
  130. {
  131. if (flags & CLK_DIVIDER_ONE_BASED)
  132. {
  133. return div;
  134. }
  135. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  136. {
  137. return __rt_ffs(div) - 1;
  138. }
  139. if (flags & CLK_DIVIDER_MAX_AT_ZERO)
  140. {
  141. return (div == clk_div_mask(width) + 1) ? 0 : div;
  142. }
  143. if (table)
  144. {
  145. return _get_table_val(table, div);
  146. }
  147. return div - 1;
  148. }
  149. static int _round_up_table(const struct clk_div_table *table, int div)
  150. {
  151. int up = RT_UINT32_MAX >> 1;
  152. const struct clk_div_table *clkt;
  153. for (clkt = table; clkt->div; ++clkt)
  154. {
  155. if (clkt->div == div)
  156. {
  157. return clkt->div;
  158. }
  159. else if (clkt->div < div)
  160. {
  161. continue;
  162. }
  163. if (clkt->div - div < up - div)
  164. {
  165. up = clkt->div;
  166. }
  167. }
  168. return up;
  169. }
  170. static int _round_down_table(const struct clk_div_table *table, int div)
  171. {
  172. int down = _get_table_mindiv(table);
  173. const struct clk_div_table *clkt;
  174. for (clkt = table; clkt->div; ++clkt)
  175. {
  176. if (clkt->div == div)
  177. {
  178. return clkt->div;
  179. }
  180. else if (clkt->div > div)
  181. {
  182. continue;
  183. }
  184. if (div - clkt->div < div - down)
  185. {
  186. down = clkt->div;
  187. }
  188. }
  189. return down;
  190. }
  191. static int _div_round_up(const struct clk_div_table *table,
  192. rt_ubase_t parent_rate, rt_ubase_t rate, rt_ubase_t flags)
  193. {
  194. int div = RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, rate);
  195. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  196. {
  197. div = __roundup_pow_of_two(div);
  198. }
  199. if (table)
  200. {
  201. div = _round_up_table(table, div);
  202. }
  203. return div;
  204. }
  205. static int _div_round_closest(const struct clk_div_table *table,
  206. rt_ubase_t parent_rate, rt_ubase_t rate, rt_ubase_t flags)
  207. {
  208. int up, down;
  209. rt_ubase_t up_rate, down_rate;
  210. up = RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, rate);
  211. down = parent_rate / rate;
  212. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  213. {
  214. up = __roundup_pow_of_two(up);
  215. down = __rounddown_pow_of_two(down);
  216. }
  217. else if (table)
  218. {
  219. up = _round_up_table(table, up);
  220. down = _round_down_table(table, down);
  221. }
  222. up_rate = RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, up);
  223. down_rate = RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, down);
  224. return (rate - up_rate) <= (down_rate - rate) ? up : down;
  225. }
  226. static int _div_round(const struct clk_div_table *table,
  227. rt_ubase_t parent_rate, rt_ubase_t rate, rt_ubase_t flags)
  228. {
  229. if (flags & CLK_DIVIDER_ROUND_CLOSEST)
  230. {
  231. return _div_round_closest(table, parent_rate, rate, flags);
  232. }
  233. return _div_round_up(table, parent_rate, rate, flags);
  234. }
  235. static rt_bool_t _is_best_div(rt_ubase_t rate, rt_ubase_t now, rt_ubase_t best, rt_ubase_t flags)
  236. {
  237. if (flags & CLK_DIVIDER_ROUND_CLOSEST)
  238. {
  239. return rt_abs(rate - now) < rt_abs(rate - best);
  240. }
  241. return now <= rate && now > best;
  242. }
  243. static int _next_div(const struct clk_div_table *table, int div, rt_ubase_t flags)
  244. {
  245. ++div;
  246. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  247. {
  248. return __roundup_pow_of_two(div);
  249. }
  250. if (table)
  251. {
  252. return _round_up_table(table, div);
  253. }
  254. return div;
  255. }
  256. static int clk_divider_bestdiv(struct rt_clk_cell *cell, struct rt_clk_cell *parent,
  257. rt_ubase_t rate, rt_ubase_t *best_parent_rate,
  258. const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags)
  259. {
  260. int bestdiv = 0;
  261. rt_ubase_t parent_rate, best = 0, now, maxdiv, parent_rate_saved = *best_parent_rate;
  262. if (!rate)
  263. {
  264. rate = 1;
  265. }
  266. maxdiv = _get_maxdiv(table, width, flags);
  267. if (!(cell->flags & RT_CLK_F_SET_RATE_PARENT))
  268. {
  269. parent_rate = *best_parent_rate;
  270. bestdiv = _div_round(table, parent_rate, rate, flags);
  271. bestdiv = bestdiv == 0 ? 1 : bestdiv;
  272. bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
  273. return bestdiv;
  274. }
  275. /*
  276. * The maximum divider we can use without overflowing
  277. * rt_ubase_t in rate * i below
  278. */
  279. maxdiv = rt_min((~0UL) / rate, maxdiv);
  280. for (int i = _next_div(table, 0, flags); i <= maxdiv; i = _next_div(table, i, flags))
  281. {
  282. if (rate * i == parent_rate_saved)
  283. {
  284. /*
  285. * It's the most ideal case if the requested rate can be
  286. * divided from parent clock without needing to change
  287. * parent rate, so return the divider immediately.
  288. */
  289. *best_parent_rate = parent_rate_saved;
  290. return i;
  291. }
  292. parent_rate = rt_clk_cell_round_rate(parent, rate * i);
  293. now = RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, i);
  294. if (_is_best_div(rate, now, best, flags))
  295. {
  296. bestdiv = i;
  297. best = now;
  298. *best_parent_rate = parent_rate;
  299. }
  300. }
  301. if (!bestdiv)
  302. {
  303. bestdiv = _get_maxdiv(table, width, flags);
  304. *best_parent_rate = rt_clk_cell_round_rate(parent, 1);
  305. }
  306. return bestdiv;
  307. }
  308. rt_inline rt_bool_t is_power_of_2(rt_ubase_t n)
  309. {
  310. return (n != 0 && ((n & (n - 1)) == 0));
  311. }
  312. static rt_bool_t _is_valid_table_div(const struct clk_div_table *table, rt_uint32_t div)
  313. {
  314. const struct clk_div_table *clkt;
  315. for (clkt = table; clkt->div; ++clkt)
  316. {
  317. if (clkt->div == div)
  318. {
  319. return RT_TRUE;
  320. }
  321. }
  322. return RT_FALSE;
  323. }
  324. static rt_bool_t _is_valid_div(const struct clk_div_table *table, rt_uint32_t div, rt_ubase_t flags)
  325. {
  326. if (flags & CLK_DIVIDER_POWER_OF_TWO)
  327. {
  328. return is_power_of_2(div);
  329. }
  330. if (table)
  331. {
  332. return _is_valid_table_div(table, div);
  333. }
  334. return RT_TRUE;
  335. }
  336. static int divider_get_val(rt_ubase_t rate, rt_ubase_t parent_rate,
  337. const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags)
  338. {
  339. rt_uint32_t div, value;
  340. div = RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, rate);
  341. if (!_is_valid_div(table, div, flags))
  342. {
  343. return -RT_EINVAL;
  344. }
  345. value = _get_val(table, div, flags, width);
  346. return rt_min_t(rt_uint32_t, value, clk_div_mask(width));
  347. }
  348. static int divider_determine_rate(struct rt_clk_cell *cell, struct clk_rate_request *req,
  349. const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags)
  350. {
  351. int div;
  352. div = clk_divider_bestdiv(cell, req->best_parent_cell, req->rate,
  353. &req->best_parent_rate, table, width, flags);
  354. req->rate = RT_DIV_ROUND_UP_ULL((rt_uint64_t)req->best_parent_rate, div);
  355. return 0;
  356. }
  357. static int divider_ro_determine_rate(struct rt_clk_cell *cell, struct clk_rate_request *req,
  358. const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags, rt_uint32_t val)
  359. {
  360. int div;
  361. div = _get_div(table, val, flags, width);
  362. /* Even a read-only clock can propagate a rate change */
  363. if (cell->flags & RT_CLK_F_SET_RATE_PARENT)
  364. {
  365. if (!req->best_parent_cell)
  366. {
  367. return -RT_EINVAL;
  368. }
  369. req->best_parent_rate = rt_clk_cell_round_rate(req->best_parent_cell, req->rate * div);
  370. }
  371. req->rate = RT_DIV_ROUND_UP_ULL((rt_uint64_t)req->best_parent_rate, div);
  372. return 0;
  373. }
  374. static long divider_round_rate_parent(struct rt_clk_cell *cell, struct rt_clk_cell *parent,
  375. rt_ubase_t rate, rt_ubase_t *prate,
  376. const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags)
  377. {
  378. int ret;
  379. struct clk_rate_request req;
  380. req.rate = rate;
  381. req.best_parent_rate = *prate;
  382. req.best_parent_cell = parent;
  383. if ((ret = divider_determine_rate(cell, &req, table, width, flags)))
  384. {
  385. return ret;
  386. }
  387. *prate = req.best_parent_rate;
  388. return req.rate;
  389. }
  390. static long divider_ro_round_rate_parent(struct rt_clk_cell *cell, struct rt_clk_cell *parent,
  391. rt_ubase_t rate, rt_ubase_t *prate,
  392. const struct clk_div_table *table, rt_uint8_t width, rt_ubase_t flags, rt_uint32_t val)
  393. {
  394. int ret;
  395. struct clk_rate_request req;
  396. req.rate = rate;
  397. req.best_parent_rate = *prate;
  398. req.best_parent_cell = parent;
  399. if ((ret = divider_ro_determine_rate(cell, &req, table, width, flags, val)))
  400. {
  401. return ret;
  402. }
  403. *prate = req.best_parent_rate;
  404. return req.rate;
  405. }
  406. static rt_ubase_t divider_recalc_rate(rt_ubase_t parent_rate, rt_uint32_t val,
  407. const struct clk_div_table *table, rt_ubase_t flags, rt_ubase_t width)
  408. {
  409. rt_uint32_t div = _get_div(table, val, flags, width);
  410. if (!div)
  411. {
  412. return parent_rate;
  413. }
  414. return RT_DIV_ROUND_UP_ULL((rt_uint64_t)parent_rate, div);
  415. }
  416. static rt_ubase_t clk_divider_recalc_rate(struct rt_clk_cell *cell, rt_ubase_t parent_rate)
  417. {
  418. rt_uint32_t val;
  419. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  420. val = clk_div_readl(rk_cell) >> rk_cell->div_shift;
  421. val &= clk_div_mask(rk_cell->div_width);
  422. return divider_recalc_rate(parent_rate, val,
  423. rk_cell->div_table, rk_cell->div_flags, rk_cell->div_width);
  424. }
  425. static rt_base_t clk_divider_round_rate(struct rt_clk_cell *cell,
  426. rt_ubase_t rate, rt_ubase_t *prate)
  427. {
  428. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  429. if (rk_cell->div_flags & CLK_DIVIDER_READ_ONLY)
  430. {
  431. rt_uint32_t val;
  432. val = clk_div_readl(rk_cell) >> rk_cell->div_shift;
  433. val &= clk_div_mask(rk_cell->div_width);
  434. return divider_ro_round_rate_parent(cell, rt_clk_cell_get_parent(cell),
  435. rate, prate, rk_cell->div_table,
  436. rk_cell->div_width, rk_cell->div_flags, val);
  437. }
  438. return divider_round_rate_parent(cell, rt_clk_cell_get_parent(cell),
  439. rate, prate, rk_cell->div_table,
  440. rk_cell->div_width, rk_cell->div_flags);
  441. }
  442. static rt_err_t clk_divider_set_rate(struct rt_clk_cell *cell,
  443. rt_ubase_t rate, rt_ubase_t parent_rate)
  444. {
  445. int value;
  446. rt_uint32_t val;
  447. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  448. value = divider_get_val(rate, parent_rate,
  449. rk_cell->div_table, rk_cell->div_width, rk_cell->div_flags);
  450. if (value < 0)
  451. {
  452. return value;
  453. }
  454. if (rk_cell->div_flags & CLK_DIVIDER_HIWORD_MASK)
  455. {
  456. val = clk_div_mask(rk_cell->div_width) << (rk_cell->div_shift + 16);
  457. }
  458. else
  459. {
  460. val = clk_div_readl(rk_cell);
  461. val &= ~(clk_div_mask(rk_cell->div_width) << rk_cell->div_shift);
  462. }
  463. val |= (rt_uint32_t)value << rk_cell->div_shift;
  464. clk_div_writel(rk_cell, val);
  465. return 0;
  466. }
  467. const struct rt_clk_ops clk_divider_ops =
  468. {
  469. .recalc_rate = clk_divider_recalc_rate,
  470. .round_rate = clk_divider_round_rate,
  471. .set_rate = clk_divider_set_rate,
  472. };
  473. const struct rt_clk_ops clk_divider_ro_ops =
  474. {
  475. .recalc_rate = clk_divider_recalc_rate,
  476. .round_rate = clk_divider_round_rate,
  477. };
  478. void rockchip_divider_clk_cell_init(struct rockchip_clk_cell *rk_cell)
  479. {
  480. if (rk_cell->div_flags & CLK_DIVIDER_READ_ONLY)
  481. {
  482. rk_cell->cell.ops = &clk_divider_ro_ops;
  483. }
  484. else
  485. {
  486. rk_cell->cell.ops = &clk_divider_ops;
  487. }
  488. }