clk-rk-half-divider.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-3-08 GuEe-GUI the first version
  9. */
  10. #include "clk-rk-half-divider.h"
  11. #include "clk-rk-gate.h"
  12. #include "clk-rk-mux.h"
  13. #define clk_div_mask(width) ((1 << (width)) - 1)
  14. static rt_bool_t _is_best_half_div(rt_ubase_t rate, rt_ubase_t now,
  15. rt_ubase_t best, rt_ubase_t flags)
  16. {
  17. if (flags & CLK_DIVIDER_ROUND_CLOSEST)
  18. {
  19. return rt_abs(rate - now) <= rt_abs(rate - best);
  20. }
  21. return now <= rate && now >= best;
  22. }
  23. rt_inline rt_uint32_t clk_div_readl(struct rockchip_clk_cell *rk_cell)
  24. {
  25. return HWREG32(rk_cell->provider->reg_base + (rk_cell->div_offset ? : rk_cell->muxdiv_offset));
  26. }
  27. rt_inline void clk_div_writel(struct rockchip_clk_cell *rk_cell, rt_uint32_t val)
  28. {
  29. HWREG32(rk_cell->provider->reg_base + (rk_cell->div_offset ? : rk_cell->muxdiv_offset)) = val;
  30. }
  31. static rt_ubase_t clk_div_recalc_rate(struct rt_clk_cell *cell, rt_ubase_t parent_rate)
  32. {
  33. rt_uint32_t val;
  34. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  35. val = clk_div_readl(rk_cell) >> rk_cell->div_shift;
  36. val &= clk_div_mask(rk_cell->div_width);
  37. val = val * 2 + 3;
  38. return RT_DIV_ROUND_UP_ULL(((rt_uint64_t)parent_rate * 2), val);
  39. }
  40. static int clk_div_bestdiv(struct rt_clk_cell *cell, rt_ubase_t rate,
  41. rt_ubase_t *best_parent_rate, rt_uint8_t width, rt_ubase_t flags)
  42. {
  43. rt_uint32_t bestdiv = 0;
  44. rt_bool_t is_bestdiv = RT_FALSE;
  45. rt_ubase_t parent_rate, best = 0, now, maxdiv;
  46. if (!rate)
  47. {
  48. rate = 1;
  49. }
  50. maxdiv = clk_div_mask(width);
  51. if (!(cell->flags & RT_CLK_F_SET_RATE_PARENT))
  52. {
  53. parent_rate = *best_parent_rate;
  54. bestdiv = RT_DIV_ROUND_UP_ULL(((rt_uint64_t)parent_rate * 2), rate);
  55. if (bestdiv < 3)
  56. {
  57. bestdiv = 0;
  58. }
  59. else
  60. {
  61. bestdiv = RT_DIV_ROUND_UP(bestdiv - 3, 2);
  62. }
  63. bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
  64. return bestdiv;
  65. }
  66. /*
  67. * The maximum divider we can use without overflowing
  68. * rt_ubase_t in rate * i below
  69. */
  70. maxdiv = rt_min((~0UL) / rate, maxdiv);
  71. for (int i = 0; i <= maxdiv; ++i)
  72. {
  73. parent_rate = rt_clk_cell_round_rate(rt_clk_cell_get_parent(cell),
  74. ((rt_uint64_t)rate * (i * 2 + 3)) / 2);
  75. now = RT_DIV_ROUND_UP_ULL(((rt_uint64_t)parent_rate * 2), (i * 2 + 3));
  76. if (_is_best_half_div(rate, now, best, flags))
  77. {
  78. is_bestdiv = RT_TRUE;
  79. bestdiv = i;
  80. best = now;
  81. *best_parent_rate = parent_rate;
  82. }
  83. }
  84. if (!is_bestdiv)
  85. {
  86. bestdiv = clk_div_mask(width);
  87. *best_parent_rate = rt_clk_cell_round_rate(rt_clk_cell_get_parent(cell), 1);
  88. }
  89. return bestdiv;
  90. }
  91. static rt_base_t clk_div_round_rate(struct rt_clk_cell *cell,
  92. rt_ubase_t rate, rt_ubase_t *prate)
  93. {
  94. int div;
  95. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  96. div = clk_div_bestdiv(cell, rate, prate, rk_cell->div_width, rk_cell->div_flags);
  97. return RT_DIV_ROUND_UP_ULL(((rt_uint64_t)*prate * 2), div * 2 + 3);
  98. }
  99. static rt_err_t clk_div_set_rate(struct rt_clk_cell *cell,
  100. rt_ubase_t rate, rt_ubase_t parent_rate)
  101. {
  102. rt_uint32_t value, val;
  103. struct rockchip_clk_cell *rk_cell = cell_to_rockchip_clk_cell(cell);
  104. value = RT_DIV_ROUND_UP_ULL(((rt_uint64_t)parent_rate * 2), rate);
  105. value = RT_DIV_ROUND_UP(value - 3, 2);
  106. value = rt_min_t(rt_uint32_t, value, clk_div_mask(rk_cell->div_width));
  107. if (rk_cell->div_flags & CLK_DIVIDER_HIWORD_MASK)
  108. {
  109. val = clk_div_mask(rk_cell->div_width) << (rk_cell->div_shift + 16);
  110. }
  111. else
  112. {
  113. val = clk_div_readl(rk_cell);
  114. val &= ~(clk_div_mask(rk_cell->div_width) << rk_cell->div_shift);
  115. }
  116. val |= value << rk_cell->div_shift;
  117. clk_div_writel(rk_cell, val);
  118. return RT_EOK;
  119. }
  120. void rockchip_half_divider_clk_cell_init(struct rockchip_clk_cell *rk_cell)
  121. {
  122. struct rockchip_half_divider_clk_cell *half_divider_cell = cell_to_rockchip_half_divider_clk_cell(&rk_cell->cell);
  123. rk_cell->cell.ops = &half_divider_cell->ops;
  124. if (rk_cell->cell.parents_nr > 1)
  125. {
  126. rockchip_mux_clk_cell_init(rk_cell);
  127. half_divider_cell->ops.get_parent = rockchip_mux_clk_ops.get_parent;
  128. if (!((rk_cell->mux_flags & CLK_MUX_READ_ONLY)))
  129. {
  130. half_divider_cell->ops.set_parent = rockchip_mux_clk_ops.set_parent;
  131. }
  132. }
  133. if (rk_cell->gate_offset >= 0)
  134. {
  135. half_divider_cell->ops.enable = rockchip_gate_clk_ops.enable;
  136. half_divider_cell->ops.disable = rockchip_gate_clk_ops.disable;
  137. half_divider_cell->ops.is_enabled = rockchip_gate_clk_ops.is_enabled;
  138. half_divider_cell->ops.recalc_rate = clk_div_recalc_rate;
  139. half_divider_cell->ops.round_rate = clk_div_round_rate;
  140. half_divider_cell->ops.set_rate = clk_div_set_rate;
  141. }
  142. if (rk_cell->div_width > 0)
  143. {
  144. half_divider_cell->ops.set_rate = clk_div_set_rate;
  145. }
  146. }