hwspinlock.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-23 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #include <cpuport.h>
  12. #define DBG_TAG "rtdm.hwspinlock"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include "hwspinlock_dm.h"
  16. static RT_DEFINE_SPINLOCK(hwspinlock_ops_lock);
  17. static rt_list_t hwspinlock_bank_nodes = RT_LIST_OBJECT_INIT(hwspinlock_bank_nodes);
  18. rt_err_t rt_hwspinlock_bank_register(struct rt_hwspinlock_bank *bank)
  19. {
  20. struct rt_hwspinlock *hwlock;
  21. if (!bank || !bank->ops || bank->locks_nr <= 0 || !bank->dev)
  22. {
  23. return -RT_EINVAL;
  24. }
  25. rt_list_init(&bank->list);
  26. rt_ref_init(&bank->ref);
  27. hwlock = &bank->locks[0];
  28. for (int i = 0; i < bank->locks_nr; ++i, ++hwlock)
  29. {
  30. hwlock->bank = bank;
  31. hwlock->used = RT_FALSE;
  32. rt_spin_lock_init(&hwlock->lock);
  33. }
  34. rt_spin_lock(&hwspinlock_ops_lock);
  35. rt_list_insert_after(&hwspinlock_bank_nodes, &bank->list);
  36. rt_spin_unlock(&hwspinlock_ops_lock);
  37. rt_dm_dev_bind_fwdata(bank->dev, RT_NULL, bank);
  38. return RT_EOK;
  39. }
  40. rt_err_t rt_hwspinlock_bank_unregister(struct rt_hwspinlock_bank *bank)
  41. {
  42. rt_err_t err;
  43. if (!bank)
  44. {
  45. return -RT_EINVAL;
  46. }
  47. rt_spin_lock(&hwspinlock_ops_lock);
  48. if (rt_ref_read(&bank->ref) == 1)
  49. {
  50. rt_list_remove(&bank->list);
  51. rt_dm_dev_unbind_fwdata(bank->dev, RT_NULL);
  52. err = RT_EOK;
  53. }
  54. else
  55. {
  56. err = -RT_EBUSY;
  57. }
  58. rt_spin_unlock(&hwspinlock_ops_lock);
  59. return err;
  60. }
  61. rt_err_t rt_hwspin_trylock_raw(struct rt_hwspinlock *hwlock,
  62. rt_ubase_t *out_irq_level)
  63. {
  64. rt_err_t err;
  65. if (!hwlock)
  66. {
  67. return -RT_EINVAL;
  68. }
  69. if (out_irq_level)
  70. {
  71. *out_irq_level = rt_spin_lock_irqsave(&hwlock->lock);
  72. }
  73. else
  74. {
  75. rt_spin_lock(&hwlock->lock);
  76. }
  77. err = hwlock->bank->ops->trylock(hwlock);
  78. if (err)
  79. {
  80. if (out_irq_level)
  81. {
  82. rt_spin_unlock_irqrestore(&hwlock->lock, *out_irq_level);
  83. }
  84. else
  85. {
  86. rt_spin_unlock(&hwlock->lock);
  87. }
  88. }
  89. rt_hw_dmb();
  90. return err;
  91. }
  92. rt_err_t rt_hwspin_lock_timeout_raw(struct rt_hwspinlock *hwlock,
  93. rt_uint32_t timeout_ms, rt_ubase_t *out_irq_level)
  94. {
  95. rt_err_t err;
  96. rt_tick_t timeout = rt_tick_get() + rt_tick_from_millisecond(timeout_ms);
  97. for (;;)
  98. {
  99. err = rt_hwspin_trylock_raw(hwlock, out_irq_level);
  100. if (err != -RT_EBUSY)
  101. {
  102. break;
  103. }
  104. if (timeout < rt_tick_get())
  105. {
  106. return -RT_ETIMEOUT;
  107. }
  108. if (hwlock->bank->ops->relax)
  109. {
  110. hwlock->bank->ops->relax(hwlock);
  111. }
  112. }
  113. return err;
  114. }
  115. void rt_hwspin_unlock_raw(struct rt_hwspinlock *hwlock,
  116. rt_ubase_t *out_irq_level)
  117. {
  118. if (!hwlock)
  119. {
  120. return;
  121. }
  122. rt_hw_dmb();
  123. hwlock->bank->ops->unlock(hwlock);
  124. if (out_irq_level)
  125. {
  126. rt_spin_unlock_irqrestore(&hwlock->lock, *out_irq_level);
  127. }
  128. else
  129. {
  130. rt_spin_unlock(&hwlock->lock);
  131. }
  132. }
  133. static struct rt_hwspinlock *hwspinlock_get(struct rt_hwspinlock_bank *bank, int id)
  134. {
  135. struct rt_hwspinlock *hwlock = RT_NULL;
  136. if (bank)
  137. {
  138. int offset = id - bank->base_id;
  139. if (!bank->locks[offset].used)
  140. {
  141. hwlock = &bank->locks[offset];
  142. }
  143. }
  144. else
  145. {
  146. rt_list_for_each_entry(bank, &hwspinlock_bank_nodes, list)
  147. {
  148. hwlock = rt_err_ptr(-RT_EBUSY);
  149. for (int i = 0; i < bank->locks_nr; ++i)
  150. {
  151. if (!bank->locks[i].used)
  152. {
  153. hwlock = &bank->locks[i];
  154. goto _found;
  155. }
  156. }
  157. }
  158. }
  159. _found:
  160. if (!rt_is_err_or_null(hwlock))
  161. {
  162. hwlock->used = RT_TRUE;
  163. rt_ref_get(&hwlock->bank->ref);
  164. }
  165. return hwlock;
  166. }
  167. struct rt_hwspinlock *rt_hwspinlock_get(void)
  168. {
  169. struct rt_hwspinlock *lock;
  170. rt_spin_lock(&hwspinlock_ops_lock);
  171. lock = hwspinlock_get(RT_NULL, -1);
  172. rt_spin_unlock(&hwspinlock_ops_lock);
  173. return lock;
  174. }
  175. struct rt_hwspinlock *rt_hwspinlock_get_by_index(struct rt_device *dev, int index)
  176. {
  177. return rt_ofw_get_hwspinlock_by_index(dev->ofw_node, index);
  178. }
  179. struct rt_hwspinlock *rt_hwspinlock_get_by_name(struct rt_device *dev, const char *name)
  180. {
  181. return rt_ofw_get_hwspinlock_by_name(dev->ofw_node, name);
  182. }
  183. static void hwspinlock_release(struct rt_ref *r)
  184. {
  185. struct rt_hwspinlock_bank *bank = rt_container_of(r, struct rt_hwspinlock_bank, ref);
  186. LOG_E("%s is release", rt_dm_dev_get_name(bank->dev));
  187. (void)bank;
  188. RT_ASSERT(0);
  189. }
  190. void rt_hwspinlock_put(struct rt_hwspinlock *hwlock)
  191. {
  192. if (hwlock)
  193. {
  194. rt_spin_lock(&hwspinlock_ops_lock);
  195. hwlock->used = RT_FALSE;
  196. rt_spin_unlock(&hwspinlock_ops_lock);
  197. rt_ref_put(&hwlock->bank->ref, &hwspinlock_release);
  198. }
  199. }
  200. struct rt_hwspinlock *rt_ofw_get_hwspinlock_by_index(struct rt_ofw_node *np, int index)
  201. {
  202. rt_err_t err;
  203. struct rt_ofw_node *bank_np;
  204. struct rt_ofw_cell_args args;
  205. struct rt_hwspinlock *lock;
  206. struct rt_hwspinlock_bank *bank;
  207. if (!np || index < 0)
  208. {
  209. return rt_err_ptr(-RT_EINVAL);
  210. }
  211. err = rt_ofw_parse_phandle_cells(np, "hwlocks", "#hwlock-cells", index, &args);
  212. if (err)
  213. {
  214. return rt_err_ptr(err);
  215. }
  216. bank_np = args.data;
  217. if (!rt_ofw_data(bank_np))
  218. {
  219. rt_platform_ofw_request(bank_np);
  220. }
  221. rt_spin_lock(&hwspinlock_ops_lock);
  222. bank = rt_ofw_data(bank_np);
  223. rt_ofw_node_put(bank_np);
  224. if (!bank || args.args_count != 1)
  225. {
  226. lock = rt_err_ptr(-RT_ENOSYS);
  227. }
  228. else
  229. {
  230. lock = hwspinlock_get(bank, bank->base_id + args.args[0]);
  231. }
  232. rt_spin_unlock(&hwspinlock_ops_lock);
  233. return lock;
  234. }
  235. struct rt_hwspinlock *rt_ofw_get_hwspinlock_by_name(struct rt_ofw_node *np, const char *name)
  236. {
  237. int index;
  238. if (!np || !name)
  239. {
  240. return rt_err_ptr(-RT_EINVAL);
  241. }
  242. index = rt_ofw_prop_index_of_string(np, "hwlock-names", name);
  243. if (index < 0)
  244. {
  245. return rt_err_ptr(index);
  246. }
  247. return rt_ofw_get_hwspinlock_by_index(np, index);
  248. }