clk-rk-fraction-divider.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-3-08 GuEe-GUI the first version
  9. */
  10. #include "clk-rk-fraction-divider.h"
  11. #include "clk-rk-mux.h"
  12. #include "clk-rk-gate.h"
  13. struct u32_fract
  14. {
  15. rt_uint32_t numerator;
  16. rt_uint32_t denominator;
  17. };
  18. #define CLK_FD_MSHIFT 16
  19. #define CLK_FD_MWIDTH 16
  20. #define CLK_FD_NSHIFT 0
  21. #define CLK_FD_NWIDTH 16
  22. rt_inline rt_uint32_t clk_fd_readl(struct rockchip_clk_cell *rk_cell)
  23. {
  24. void *base = rk_cell->provider->reg_base;
  25. if (rk_cell->div_flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
  26. {
  27. return rt_be32_to_cpu(HWREG32(base + rk_cell->muxdiv_offset));
  28. }
  29. return HWREG32(base + rk_cell->muxdiv_offset);
  30. }
  31. rt_inline void clk_fd_writel(struct rockchip_clk_cell *rk_cell, rt_uint32_t val)
  32. {
  33. void *base = rk_cell->provider->reg_base;
  34. if (rk_cell->div_flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
  35. {
  36. HWREG32(base + rk_cell->muxdiv_offset) = rt_cpu_to_be32(val);
  37. }
  38. else
  39. {
  40. HWREG32(base + rk_cell->muxdiv_offset) = val;
  41. }
  42. }
  43. static void clk_fd_get_div(struct rt_clk_cell *cell, struct u32_fract *fract)
  44. {
  45. rt_ubase_t m, n;
  46. rt_uint32_t val, mmask, nmask;
  47. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  48. val = clk_fd_readl(rk_cell);
  49. mmask = RT_GENMASK(CLK_FD_MWIDTH - 1, 0) << CLK_FD_MSHIFT;
  50. nmask = RT_GENMASK(CLK_FD_NWIDTH - 1, 0) << CLK_FD_NSHIFT;
  51. m = (val & mmask) >> CLK_FD_MSHIFT;
  52. n = (val & nmask) >> CLK_FD_NSHIFT;
  53. if (rk_cell->div_flags & CLK_FRAC_DIVIDER_ZERO_BASED)
  54. {
  55. ++m;
  56. ++n;
  57. }
  58. fract->numerator = m;
  59. fract->denominator = n;
  60. }
  61. static rt_ubase_t clk_fd_recalc_rate(struct rt_clk_cell *cell, rt_ubase_t parent_rate)
  62. {
  63. rt_uint64_t ret;
  64. struct u32_fract fract;
  65. clk_fd_get_div(cell, &fract);
  66. if (!fract.numerator || !fract.denominator)
  67. {
  68. return parent_rate;
  69. }
  70. ret = (rt_uint64_t)parent_rate * fract.numerator;
  71. rt_do_div(ret, fract.denominator);
  72. return ret;
  73. }
  74. static void clk_fractional_divider_general_approximation(struct rt_clk_cell *cell,
  75. rt_ubase_t rate, rt_ubase_t *parent_rate, rt_ubase_t *m, rt_ubase_t *n)
  76. {
  77. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  78. /*
  79. * Get rate closer to *parent_rate to guarantee there is no overflow
  80. * for m and n. In the result it will be the nearest rate left shifted
  81. * by (scale - CLK_FD_NWIDTH) bits.
  82. *
  83. * For the detailed explanation see the top comment in this file.
  84. */
  85. if (rk_cell->div_flags & CLK_FRAC_DIVIDER_POWER_OF_TWO_PS)
  86. {
  87. rt_ubase_t scale = fls_long(*parent_rate / rate - 1);
  88. if (scale > CLK_FD_NWIDTH)
  89. {
  90. rate <<= scale - CLK_FD_NWIDTH;
  91. }
  92. }
  93. rational_best_approximation(rate, *parent_rate,
  94. RT_GENMASK(CLK_FD_MWIDTH - 1, 0), RT_GENMASK(CLK_FD_NWIDTH - 1, 0), m, n);
  95. }
  96. /*
  97. * fractional divider must set that denominator is 20 times larger than
  98. * numerator to generate precise clock frequency.
  99. */
  100. static void rockchip_fractional_approximation(struct rt_clk_cell *cell,
  101. rt_ubase_t rate, rt_ubase_t *parent_rate, rt_ubase_t *m, rt_ubase_t *n)
  102. {
  103. struct rt_clk_cell *p_parent;
  104. rt_ubase_t p_rate, p_parent_rate;
  105. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  106. if (rate == 0)
  107. {
  108. *m = 0;
  109. *n = 1;
  110. return;
  111. }
  112. p_rate = rt_clk_cell_get_rate(rt_clk_cell_get_parent(cell));
  113. if (rate * 20 > p_rate && p_rate % rate != 0)
  114. {
  115. p_parent = rt_clk_cell_get_parent(rt_clk_cell_get_parent(cell));
  116. if (!p_parent)
  117. {
  118. *parent_rate = p_rate;
  119. }
  120. else
  121. {
  122. p_parent_rate = rt_clk_cell_get_rate(p_parent);
  123. *parent_rate = p_parent_rate;
  124. }
  125. if (*parent_rate == 0)
  126. {
  127. *m = 0;
  128. *n = 1;
  129. return;
  130. }
  131. if (*parent_rate < rate * 20)
  132. {
  133. /*
  134. * Fractional frequency divider to do
  135. * integer frequency divider does not need 20 times the limit.
  136. */
  137. if (!(*parent_rate % rate))
  138. {
  139. *m = 1;
  140. *n = *parent_rate / rate;
  141. return;
  142. }
  143. else if (!(rk_cell->div_flags & CLK_FRAC_DIVIDER_NO_LIMIT))
  144. {
  145. *m = 0;
  146. *n = 1;
  147. return;
  148. }
  149. }
  150. }
  151. rk_cell->div_flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS;
  152. clk_fractional_divider_general_approximation(cell, rate, parent_rate, m, n);
  153. }
  154. static rt_base_t clk_fd_round_rate(struct rt_clk_cell *cell,
  155. rt_ubase_t rate, rt_ubase_t *parent_rate)
  156. {
  157. rt_ubase_t m, n;
  158. rt_uint64_t ret;
  159. if (!rate || rate >= *parent_rate)
  160. {
  161. return *parent_rate;
  162. }
  163. rockchip_fractional_approximation(cell, rate, parent_rate, &m, &n);
  164. ret = (rt_uint64_t)*parent_rate * m;
  165. rt_do_div(ret, n);
  166. return ret;
  167. }
  168. static rt_err_t clk_fd_set_rate(struct rt_clk_cell *cell, rt_ubase_t rate, rt_ubase_t parent_rate)
  169. {
  170. rt_ubase_t m, n;
  171. rt_uint32_t mmask, nmask, val;
  172. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  173. rational_best_approximation(rate, parent_rate,
  174. RT_GENMASK(CLK_FD_MWIDTH - 1, 0), RT_GENMASK(CLK_FD_NWIDTH - 1, 0), &m, &n);
  175. if (rk_cell->div_flags & CLK_FRAC_DIVIDER_ZERO_BASED)
  176. {
  177. --m;
  178. --n;
  179. }
  180. mmask = RT_GENMASK(CLK_FD_MWIDTH - 1, 0) << CLK_FD_MSHIFT;
  181. nmask = RT_GENMASK(CLK_FD_NWIDTH - 1, 0) << CLK_FD_NSHIFT;
  182. /*
  183. * When compensation the fractional divider,
  184. * the [1:0] bits of the numerator register are omitted,
  185. * which will lead to a large deviation in the result.
  186. * Therefore, it is required that the numerator must
  187. * be greater than 4.
  188. *
  189. * Note that there are some exceptions here:
  190. * If there is an even frac div, we need to keep the original
  191. * numerator(<4) and denominator. Otherwise, it may cause the
  192. * issue that the duty ratio is not 50%.
  193. */
  194. if (m < 4 && m != 0)
  195. {
  196. if (n % 2 == 0)
  197. {
  198. val = 1;
  199. }
  200. else
  201. {
  202. val = RT_DIV_ROUND_UP(4, m);
  203. }
  204. n *= val;
  205. m *= val;
  206. if (n > nmask)
  207. {
  208. n = nmask;
  209. }
  210. }
  211. mmask = RT_GENMASK(CLK_FD_MWIDTH - 1, 0) << CLK_FD_MSHIFT;
  212. nmask = RT_GENMASK(CLK_FD_NWIDTH - 1, 0) << CLK_FD_NSHIFT;
  213. val = clk_fd_readl(rk_cell);
  214. val &= ~(mmask | nmask);
  215. val |= (m << CLK_FD_MSHIFT) | (n << CLK_FD_NSHIFT);
  216. clk_fd_writel(rk_cell, val);
  217. return RT_EOK;
  218. }
  219. static rt_err_t rockchip_clk_frac_notify(struct rt_clk_notifier *notifier,
  220. rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
  221. {
  222. struct rt_clk_cell *cell;
  223. struct rockchip_fraction_divider_clk_cell *fraction_divider_cell;
  224. fraction_divider_cell = rt_container_of(notifier, struct rockchip_fraction_divider_clk_cell, notifier);
  225. cell = &fraction_divider_cell->rk_cell_child->cell;
  226. if (msg == RT_CLK_MSG_PRE_RATE_CHANGE)
  227. {
  228. fraction_divider_cell->rate_change_idx = cell->ops->get_parent(cell);
  229. if (fraction_divider_cell->rate_change_idx != fraction_divider_cell->mux_frac_idx)
  230. {
  231. cell->ops->set_parent(cell, fraction_divider_cell->mux_frac_idx);
  232. fraction_divider_cell->rate_change_remuxed = 1;
  233. }
  234. }
  235. else if (msg == RT_CLK_MSG_POST_RATE_CHANGE)
  236. {
  237. /*
  238. * The RT_CLK_MSG_POST_RATE_CHANGE notifier runs directly after the
  239. * divider clock is set in clk_change_rate, so we'll have
  240. * remuxed back to the original parent before clk_change_rate
  241. * reaches the mux itself.
  242. */
  243. if (fraction_divider_cell->rate_change_remuxed)
  244. {
  245. cell->ops->set_parent(cell, fraction_divider_cell->rate_change_idx);
  246. fraction_divider_cell->rate_change_remuxed = 0;
  247. }
  248. }
  249. return RT_EOK;
  250. }
  251. static int match_string(const char * const *array, size_t n, const char *string)
  252. {
  253. for (int index = 0; index < n; ++index)
  254. {
  255. const char *item = array[index];
  256. if (!item)
  257. {
  258. break;
  259. }
  260. if (!rt_strcmp(item, string))
  261. {
  262. return index;
  263. }
  264. }
  265. return -RT_EINVAL;
  266. }
  267. void rockchip_fraction_divider_clk_cell_init(struct rockchip_clk_cell *rk_cell)
  268. {
  269. struct rockchip_fraction_divider_clk_cell *fraction_divider_cell = cell_to_rockchip_fraction_divider_clk_cell(&rk_cell->cell);
  270. rk_cell->cell.ops = &fraction_divider_cell->ops;
  271. rk_cell->cell.flags |= RT_CLK_F_SET_RATE_UNGATE;
  272. if (fraction_divider_cell->rk_cell_child)
  273. {
  274. struct rockchip_clk_cell *rk_cell_child = fraction_divider_cell->rk_cell_child;
  275. struct rt_clk_cell *cell = &rk_cell_child->cell;
  276. rk_cell_child->cell.flags |= RT_CLK_F_SET_RATE_PARENT;
  277. fraction_divider_cell->mux_frac_idx = match_string(cell->parent_names, cell->parents_nr,
  278. rk_cell->cell.name);
  279. rockchip_mux_clk_cell_init(rk_cell);
  280. fraction_divider_cell->ops.get_parent = rockchip_mux_clk_ops.get_parent;
  281. fraction_divider_cell->ops.set_parent = rockchip_mux_clk_ops.set_parent;
  282. }
  283. if (rk_cell->gate_offset >= 0)
  284. {
  285. fraction_divider_cell->ops.enable = rockchip_gate_clk_ops.enable;
  286. fraction_divider_cell->ops.disable = rockchip_gate_clk_ops.disable;
  287. fraction_divider_cell->ops.is_enabled = rockchip_gate_clk_ops.is_enabled;
  288. }
  289. fraction_divider_cell->ops.recalc_rate = clk_fd_recalc_rate;
  290. fraction_divider_cell->ops.round_rate = clk_fd_round_rate;
  291. fraction_divider_cell->ops.set_rate = clk_fd_set_rate;
  292. }
  293. void rockchip_fraction_divider_clk_cell_setup(struct rockchip_clk_cell *rk_cell)
  294. {
  295. struct rockchip_fraction_divider_clk_cell *fraction_divider_cell = cell_to_rockchip_fraction_divider_clk_cell(&rk_cell->cell);
  296. struct rockchip_clk_cell *rk_cell_child = fraction_divider_cell->rk_cell_child;
  297. if (fraction_divider_cell->mux_frac_idx >= 0)
  298. {
  299. fraction_divider_cell->notifier.callback = rockchip_clk_frac_notify;
  300. rt_clk_notifier_register(rt_clk_cell_get_clk(&rk_cell_child->cell, RT_NULL),
  301. &fraction_divider_cell->notifier);
  302. }
  303. }