vdso_sys.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2025-04-22 ScuDays Add VDSO functionality under the riscv64 architecture.
  9. * 2025-05-10 Bernard Move __arch_get_hw_frq() to vdso_sys.c as a weak function.
  10. */
  11. #include <stdio.h>
  12. #include <time.h>
  13. #include <errno.h>
  14. #include <stdbool.h>
  15. #include <vdso_sys.h>
  16. #ifndef rt_vdso_cycles_ready
  17. static inline bool rt_vdso_cycles_ready(uint64_t cycles)
  18. {
  19. return true;
  20. }
  21. #endif
  22. #ifndef rt_vdso_get_ns
  23. /* Implement as a weak function because there is no CPU cycle for RISCV */
  24. __attribute__((weak)) uint64_t __arch_get_hw_frq()
  25. {
  26. return 10000000;
  27. }
  28. static inline uint64_t rt_vdso_get_ns(uint64_t cycles, uint64_t last)
  29. {
  30. return (cycles - last) * NSEC_PER_SEC / __arch_get_hw_frq();
  31. }
  32. #endif
  33. static int
  34. __rt_vdso_getcoarse(struct timespec *ts, clockid_t clock, const struct vdso_data *vdns)
  35. {
  36. const struct vdso_data *vd;
  37. const struct timespec *vdso_ts;
  38. uint32_t seq;
  39. uint64_t sec, last, ns, cycles;
  40. if (clock != CLOCK_MONOTONIC_RAW)
  41. vd = &vdns[CS_HRES_COARSE];
  42. else
  43. vd = &vdns[CS_RAW];
  44. vdso_ts = &vd->basetime[clock];
  45. do {
  46. seq = rt_vdso_read_begin(vd);
  47. cycles = __arch_get_hw_counter();
  48. if (unlikely(!rt_vdso_cycles_ready(cycles)))
  49. return -1;
  50. ns = vdso_ts->tv_nsec;
  51. last = vd->cycle_last;
  52. ns += rt_vdso_get_ns(cycles, last);
  53. sec = vdso_ts->tv_sec;
  54. } while (unlikely(rt_vdso_read_retry(vd, seq)));
  55. ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  56. ts->tv_nsec = ns;
  57. return 0;
  58. }
  59. static inline int
  60. __vdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
  61. struct timespec *ts)
  62. {
  63. u_int32_t msk;
  64. if (unlikely((u_int32_t)clock >= MAX_CLOCKS))
  65. return -1;
  66. msk = 1U << clock;
  67. if (likely(msk & VDSO_REALTIME))
  68. return __rt_vdso_getcoarse(ts, CLOCK_REALTIME, vd);
  69. else if (msk & VDSO_MONOTIME)
  70. return __rt_vdso_getcoarse(ts, CLOCK_MONOTONIC, vd);
  71. else
  72. return ENOENT;
  73. }
  74. static __maybe_unused int
  75. rt_vdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
  76. struct timespec *ts)
  77. {
  78. int ret = 0;
  79. ret = __vdso_clock_gettime_common(vd, clock, ts);
  80. return ret;
  81. }
  82. int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
  83. {
  84. return rt_vdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
  85. }