atomic.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /* Copyright 2018 Canaan Inc.
  2. *
  3. * Licensed under the Apache License, Version 2.0 (the "License");
  4. * you may not use this file except in compliance with the License.
  5. * You may obtain a copy of the License at
  6. *
  7. * http://www.apache.org/licenses/LICENSE-2.0
  8. *
  9. * Unless required by applicable law or agreed to in writing, software
  10. * distributed under the License is distributed on an "AS IS" BASIS,
  11. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. * See the License for the specific language governing permissions and
  13. * limitations under the License.
  14. */
  15. #ifndef _BSP_ATOMIC_H
  16. #define _BSP_ATOMIC_H
  17. #ifdef __cplusplus
  18. extern "C" {
  19. #endif
  20. #define SPINLOCK_INIT \
  21. { \
  22. 0 \
  23. }
  24. #define CORELOCK_INIT \
  25. { \
  26. .lock = SPINLOCK_INIT, \
  27. .count = 0, \
  28. .core = -1 \
  29. }
  30. /* Defination of memory barrier macro */
  31. #define mb() \
  32. { \
  33. asm volatile("fence" :: \
  34. : "memory"); \
  35. }
  36. #define atomic_set(ptr, val) (*(volatile typeof(*(ptr)) *)(ptr) = val)
  37. #define atomic_read(ptr) (*(volatile typeof(*(ptr)) *)(ptr))
  38. #ifndef __riscv_atomic
  39. #error "atomic extension is required."
  40. #endif
  41. #define atomic_add(ptr, inc) __sync_fetch_and_add(ptr, inc)
  42. #define atomic_or(ptr, inc) __sync_fetch_and_or(ptr, inc)
  43. #define atomic_swap(ptr, swp) __sync_lock_test_and_set(ptr, swp)
  44. #define atomic_cas(ptr, cmp, swp) __sync_val_compare_and_swap(ptr, cmp, swp)
  45. typedef struct _spinlock
  46. {
  47. int lock;
  48. } spinlock_t;
  49. typedef struct _semaphore
  50. {
  51. spinlock_t lock;
  52. int count;
  53. int waiting;
  54. } semaphore_t;
  55. typedef struct _corelock
  56. {
  57. spinlock_t lock;
  58. int count;
  59. int core;
  60. } corelock_t;
  61. static inline int spinlock_trylock(spinlock_t *lock)
  62. {
  63. int res = atomic_swap(&lock->lock, -1);
  64. /* Use memory barrier to keep coherency */
  65. mb();
  66. return res;
  67. }
  68. static inline void spinlock_lock(spinlock_t *lock)
  69. {
  70. while(spinlock_trylock(lock))
  71. ;
  72. }
  73. static inline void spinlock_unlock(spinlock_t *lock)
  74. {
  75. /* Use memory barrier to keep coherency */
  76. mb();
  77. atomic_set(&lock->lock, 0);
  78. asm volatile("nop");
  79. }
  80. static inline void semaphore_signal(semaphore_t *semaphore, int i)
  81. {
  82. spinlock_lock(&(semaphore->lock));
  83. semaphore->count += i;
  84. spinlock_unlock(&(semaphore->lock));
  85. }
  86. static inline void semaphore_wait(semaphore_t *semaphore, int i)
  87. {
  88. atomic_add(&(semaphore->waiting), 1);
  89. while(1)
  90. {
  91. spinlock_lock(&(semaphore->lock));
  92. if(semaphore->count >= i)
  93. {
  94. semaphore->count -= i;
  95. atomic_add(&(semaphore->waiting), -1);
  96. spinlock_unlock(&(semaphore->lock));
  97. break;
  98. }
  99. spinlock_unlock(&(semaphore->lock));
  100. }
  101. }
  102. static inline int semaphore_count(semaphore_t *semaphore)
  103. {
  104. int res = 0;
  105. spinlock_lock(&(semaphore->lock));
  106. res = semaphore->count;
  107. spinlock_unlock(&(semaphore->lock));
  108. return res;
  109. }
  110. static inline int semaphore_waiting(semaphore_t *semaphore)
  111. {
  112. return atomic_read(&(semaphore->waiting));
  113. }
  114. static inline int corelock_trylock(corelock_t *lock)
  115. {
  116. int res = 0;
  117. unsigned long core;
  118. asm volatile("csrr %0, mhartid;"
  119. : "=r"(core));
  120. if(spinlock_trylock(&lock->lock))
  121. {
  122. return -1;
  123. }
  124. if(lock->count == 0)
  125. {
  126. /* First time get lock */
  127. lock->count++;
  128. lock->core = core;
  129. res = 0;
  130. } else if(lock->core == core)
  131. {
  132. /* Same core get lock */
  133. lock->count++;
  134. res = 0;
  135. } else
  136. {
  137. /* Different core get lock */
  138. res = -1;
  139. }
  140. spinlock_unlock(&lock->lock);
  141. return res;
  142. }
  143. static inline void corelock_lock(corelock_t *lock)
  144. {
  145. unsigned long core;
  146. asm volatile("csrr %0, mhartid;"
  147. : "=r"(core));
  148. spinlock_lock(&lock->lock);
  149. if(lock->count == 0)
  150. {
  151. /* First time get lock */
  152. lock->count++;
  153. lock->core = core;
  154. } else if(lock->core == core)
  155. {
  156. /* Same core get lock */
  157. lock->count++;
  158. } else
  159. {
  160. /* Different core get lock */
  161. spinlock_unlock(&lock->lock);
  162. do
  163. {
  164. while(atomic_read(&lock->count))
  165. ;
  166. } while(corelock_trylock(lock));
  167. return;
  168. }
  169. spinlock_unlock(&lock->lock);
  170. }
  171. static inline void corelock_unlock(corelock_t *lock)
  172. {
  173. unsigned long core;
  174. asm volatile("csrr %0, mhartid;"
  175. : "=r"(core));
  176. spinlock_lock(&lock->lock);
  177. if(lock->core == core)
  178. {
  179. /* Same core release lock */
  180. lock->count--;
  181. if(lock->count <= 0)
  182. {
  183. lock->core = -1;
  184. lock->count = 0;
  185. }
  186. } else
  187. {
  188. /* Different core release lock */
  189. spinlock_unlock(&lock->lock);
  190. register unsigned long a7 asm("a7") = 93;
  191. register unsigned long a0 asm("a0") = 0;
  192. register unsigned long a1 asm("a1") = 0;
  193. register unsigned long a2 asm("a2") = 0;
  194. asm volatile("scall"
  195. : "+r"(a0)
  196. : "r"(a1), "r"(a2), "r"(a7));
  197. }
  198. spinlock_unlock(&lock->lock);
  199. }
  200. #ifdef __cplusplus
  201. }
  202. #endif
  203. #endif /* _BSP_ATOMIC_H */