vdso_user_internal.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * Copyright (c) 2006-2026 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2026-04-21 rcitach init ver.
  9. */
  10. #ifndef RT_VDSO_USER_INTERNAL_H
  11. #define RT_VDSO_USER_INTERNAL_H
  12. #include <errno.h>
  13. #include <stdint.h>
  14. #include <stdbool.h>
  15. #include <time.h>
  16. #include <sys/types.h>
  17. #include <vdso_data_page.h>
  18. #include <vdso_arch.h>
  19. #define likely(x) __builtin_expect(!!(x), 1)
  20. #define unlikely(x) __builtin_expect(!!(x), 0)
  21. #ifndef barrier
  22. #define barrier() __asm__ __volatile__("" : : : "memory")
  23. #endif
  24. static inline void rt_vdso_read_once_size(const volatile void *ptr, void *dst, int size)
  25. {
  26. switch (size)
  27. {
  28. case 1:
  29. *(uint8_t *)dst = *(const volatile uint8_t *)ptr;
  30. break;
  31. case 2:
  32. *(uint16_t *)dst = *(const volatile uint16_t *)ptr;
  33. break;
  34. case 4:
  35. *(uint32_t *)dst = *(const volatile uint32_t *)ptr;
  36. break;
  37. case 8:
  38. *(uint64_t *)dst = *(const volatile uint64_t *)ptr;
  39. break;
  40. default:
  41. barrier();
  42. __builtin_memcpy(dst, (const void *)ptr, size);
  43. barrier();
  44. break;
  45. }
  46. }
  47. #define READ_ONCE(x) \
  48. ({ \
  49. union \
  50. { \
  51. typeof(x) value; \
  52. char bytes[sizeof(x)]; \
  53. } once; \
  54. rt_vdso_read_once_size(&(x), once.bytes, sizeof(x)); \
  55. once.value; \
  56. })
  57. extern const struct rt_vdso_data_page __rt_vdso_data_page[] __attribute__((visibility("hidden")));
  58. static inline const struct rt_vdso_data_page *rt_vdso_get_data_page(void)
  59. {
  60. return __rt_vdso_data_page;
  61. }
  62. static inline uint32_t rt_vdso_data_read_begin(const struct rt_vdso_data_page *data_page)
  63. {
  64. uint32_t seq;
  65. while (unlikely((seq = READ_ONCE(data_page->seq_counter)) & 1U))
  66. {
  67. rt_vdso_arch_cpu_relax();
  68. }
  69. rt_vdso_arch_rmb();
  70. return seq;
  71. }
  72. static inline uint32_t rt_vdso_data_read_retry(const struct rt_vdso_data_page *data_page,
  73. uint32_t start)
  74. {
  75. rt_vdso_arch_rmb();
  76. return READ_ONCE(data_page->seq_counter) != start;
  77. }
  78. static inline uint64_t rt_vdso_counter_delta_to_ns(uint64_t now, uint64_t last,
  79. uint64_t freq)
  80. {
  81. if (freq == 0)
  82. {
  83. return 0;
  84. }
  85. return (now - last) * RT_VDSO_NSEC_PER_SEC / freq;
  86. }
  87. static inline void rt_vdso_timespec_add_nanoseconds(struct timespec *ts, uint64_t ns)
  88. {
  89. ts->tv_sec += (time_t)(ns / RT_VDSO_NSEC_PER_SEC);
  90. ns = (uint64_t)ts->tv_nsec + (ns % RT_VDSO_NSEC_PER_SEC);
  91. if (ns >= RT_VDSO_NSEC_PER_SEC)
  92. {
  93. ts->tv_sec += 1;
  94. ns -= RT_VDSO_NSEC_PER_SEC;
  95. }
  96. ts->tv_nsec = (long)ns;
  97. }
  98. #endif /* RT_VDSO_USER_INTERNAL_H */