xmp-library.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /* Customer ID=11656; Build=0x5f626; Copyright (c) 2008-2009 by Tensilica Inc. ALL RIGHTS RESERVED.
  2. These coded instructions, statements, and computer programs are the
  3. copyrighted works and confidential proprietary information of Tensilica Inc.
  4. They may not be modified, copied, reproduced, distributed, or disclosed to
  5. third parties in any manner, medium, or form, in whole or in part, without
  6. the prior written consent of Tensilica Inc. */
  7. #ifndef _XMP_LIBRARY_H
  8. #define _XMP_LIBRARY_H
  9. #ifdef __cplusplus
  10. extern "C" {
  11. #endif
  12. #include <xtensa/config/core-isa.h>
  13. #include <xtensa/config/core.h>
  14. #include <xtensa/tie/xt_core.h>
  15. #if XCHAL_HAVE_RELEASE_SYNC
  16. #include <xtensa/tie/xt_sync.h>
  17. #endif
  18. #if XCHAL_HAVE_EXTERN_REGS
  19. #include <xtensa/xtensa-xer.h>
  20. #endif
  21. #include <stdlib.h>
  22. #include <stdio.h>
  23. #include "xtensa/system/mpsystem.h"
  24. /*
  25. W A R N I N G:
  26. xmp library clients should treat all data structures in this file
  27. as opaque. They are only public to enable users to declare them
  28. statically.
  29. */
  30. /* -------------------------------------------------------------------------
  31. When using XMP on cache-incoherent systems, these macros are helpful
  32. to ensure that you are not reading stale data, and to ensure that
  33. the data you write makes it all the way back to main memory.
  34. */
  35. #if !XCHAL_DCACHE_IS_COHERENT
  36. #define XMP_WRITE_BACK_ELEMENT(x) xthal_dcache_region_writeback((void *)x, sizeof(*x))
  37. #define XMP_INVALIDATE_ELEMENT(x) xthal_dcache_region_invalidate((void *)x, sizeof(*x))
  38. #define XMP_WRITE_BACK_INVALIDATE_ELEMENT(x) xthal_dcache_region_writeback_inv((void *)x, sizeof(*x))
  39. #define XMP_WRITE_BACK_ARRAY(x) xthal_dcache_region_writeback((void *)x, sizeof(x))
  40. #define XMP_INVALIDATE_ARRAY(x) xthal_dcache_region_invalidate((void *)x, sizeof(x))
  41. #define XMP_WRITE_BACK_INVALIDATE_ARRAY(x) xthal_dcache_region_writeback_inv((void *)x, sizeof(x))
  42. #define XMP_WRITE_BACK_ARRAY_ELEMENTS(x, num_elements) xthal_dcache_region_writeback((void *)x, sizeof(*x) * num_elements)
  43. #define XMP_INVALIDATE_ARRAY_ELEMENTS(x, num_elements) xthal_dcache_region_invalidate((void *)x, sizeof(*x) * num_elements)
  44. #define XMP_WRITE_BACK_INVALIDATE_ARRAY_ELEMENTS(x, num_elements) xthal_dcache_region_writeback_inv((void *)x, sizeof(*x) * num_elements)
  45. #else
  46. #define XMP_WRITE_BACK_ELEMENT(x)
  47. #define XMP_INVALIDATE_ELEMENT(x)
  48. #define XMP_WRITE_BACK_INVALIDATE_ELEMENT(x)
  49. #define XMP_WRITE_BACK_ARRAY(x)
  50. #define XMP_INVALIDATE_ARRAY(x)
  51. #define XMP_WRITE_BACK_INVALIDATE_ARRAY(x)
  52. #define XMP_WRITE_BACK_ARRAY_ELEMENTS(x, num_elements)
  53. #define XMP_INVALIDATE_ARRAY_ELEMENTS(x, num_elements)
  54. #define XMP_WRITE_BACK_INVALIDATE_ARRAY_ELEMENTS(x, num_elements)
  55. #endif
  56. /* -------------------------------------------------------------------------
  57. Initialization, error codes, constants and house-keeping
  58. Every core should call xmp_init with the number of cores in the
  59. system.
  60. xmp_init should be called before you use any global synchronization
  61. primitive or shared data.
  62. Further, before you use a dynamically allocated synchronization
  63. primitives, you need to both initialize it by calling the
  64. xmp_*_init function, and you need to have called xmp_init, which
  65. sets up interrupt handlers and interrupt routing.
  66. The second parameter sets the interprocessor interrupt
  67. routing. Passing zero instructs the library to use the default
  68. routing, which will be suitable for most users.
  69. */
  70. extern void xmp_init (int num_cores, unsigned int interrupt_routing);
  71. /* If you want finer-grained control than that provided by xmp_init,
  72. you can the functions below individually--however, this is more
  73. inconvenient and requires greater understanding of the library's
  74. internals. Don't use them directly unless you have a good reason.
  75. */
  76. extern void xmp_unpack_shared (void);
  77. extern void xmp_route_interrupts (unsigned int routing);
  78. #if XCHAL_HAVE_MP_INTERRUPTS
  79. extern void xmp_enable_ipi_interrupts (void);
  80. /* Turn off certain things enabled by xmp_init */
  81. extern void xmp_disable_ipi_interrupts (void);
  82. #endif
  83. extern void xmp_end (void);
  84. /* Only valid after xmp_init. */
  85. extern int xmp_num_cores (void);
  86. /* How many cycles should a core wait before rechecking a
  87. synchronization variable? Higher values will reduce memory
  88. transactions, but will also result in higher latency in returning
  89. from synchronization.
  90. */
  91. extern void xmp_spin_wait_set_cycles (unsigned int limit);
  92. /* If you would prefer to provide your own spin wait function,
  93. to go to sleep, etc. Declare a function of this type, then call
  94. this function. */
  95. typedef void (*xmp_spin_wait_function_t)(void);
  96. extern void xmp_spin_wait_set_function (xmp_spin_wait_function_t func);
  97. extern void xmp_spin(void);
  98. #define XMP_NO_OWNER 0x07
  99. #define XMP_MUTEX_DESTROYED 0xFE
  100. #define XMP_ERROR_FATAL 0xFD
  101. #define XMP_MAX_CORES 0x4
  102. static inline unsigned int xmp_prid (void)
  103. {
  104. #if XCHAL_HAVE_PRID
  105. return XT_RSR_PRID() & 0xFF;
  106. #else
  107. return 0;
  108. #endif
  109. }
  110. /* -------------------------------------------------------------------------
  111. Tracing
  112. A core must set a trace_file if it wants any synchronization
  113. tracing to occur. Sharing file descriptors among cores is very
  114. messy, so don't do it. This, unfortunately, means that two cores
  115. contending for a mutex are not able to trace to the same file.
  116. Any object (except the atomic integer) can have tracing off or on.
  117. */
  118. extern void xmp_set_trace_file (FILE * file);
  119. extern void xmp_trace (const char * fmt, ...);
  120. /* -------------------------------------------------------------------------
  121. Memory Allocation Functions.
  122. These do what you would expect, only from shared memory instead of
  123. private memory.
  124. */
  125. #if XCHAL_DCACHE_IS_COHERENT
  126. extern void * xmp_malloc (size_t size);
  127. extern void * xmp_calloc (size_t nmemb, size_t size);
  128. extern void xmp_free (void * ptr);
  129. #endif
  130. extern void * xmp_sbrk(int size);
  131. /* -------------------------------------------------------------------------
  132. xmp_atomic_int_t
  133. The most basic synchronization primitive in the xmp library.
  134. Atomic ints are sharable among processors, and even interrupt
  135. levels on the same processor. However, their semantics are fairly
  136. rudimentary. All other primitives are based on these, therefore,
  137. changing this implementation affects all other primitives.
  138. */
  139. typedef unsigned int xmp_atomic_int_t;
  140. static inline xmp_atomic_int_t
  141. xmp_coherent_l32ai(xmp_atomic_int_t * address)
  142. {
  143. XMP_INVALIDATE_ELEMENT (address);
  144. return XT_L32AI(address, 0);
  145. }
  146. static inline void
  147. xmp_coherent_s32ri(xmp_atomic_int_t value, xmp_atomic_int_t * address)
  148. {
  149. XT_S32RI (value, address, 0);
  150. XMP_WRITE_BACK_ELEMENT (address);
  151. }
  152. #define XMP_ATOMIC_INT_INITIALIZER(value) (value)
  153. /* xmp_atomic_int_init - Initialize an int prior to use
  154. Nonsynchronizing, Nonblocking
  155. Usage:
  156. value - initial value
  157. integer - points to an uninitialized integer
  158. On exit:
  159. initialized to given value
  160. Errors: none
  161. */
  162. static inline void
  163. xmp_atomic_int_init (xmp_atomic_int_t * integer, int value)
  164. {
  165. xmp_coherent_s32ri (value, integer);
  166. }
  167. /* xmp_atomic_int_value - Read the value
  168. Nonsynchronizing, Nonblocking
  169. Usage:
  170. integer - points to an int
  171. Returns:
  172. the value
  173. */
  174. static inline int
  175. xmp_atomic_int_value (xmp_atomic_int_t * integer)
  176. {
  177. return xmp_coherent_l32ai (integer);
  178. }
  179. /* xmp_atomic_int_conditional_increment - Conditionally increment integer
  180. Synchronizing, nonblocking
  181. Usage:
  182. integer - points to an initialized integer
  183. amount - how much to increment
  184. prev - believed value of the integer
  185. eg: prev = xmp_atomic_int_value (integer);
  186. success = xmp_atomic_int_increment (integer, 1, prev);
  187. Returns: current value of integer - user should check if it matches
  188. the previous value of the integer. If it does, then the update
  189. was successful.
  190. */
  191. #define USE_ASSEMBLY_IMPLEMENTATION 0
  192. static inline int
  193. xmp_atomic_int_conditional_increment (xmp_atomic_int_t * integer, int amount, int prev)
  194. {
  195. int val;
  196. int saved;
  197. #if USE_ASSEMBLY_IMPLEMENTATION
  198. /* %0 = prev
  199. %1 = saved
  200. %2 = atomic integer pointer
  201. %3 = amount
  202. */
  203. asm volatile ("wsr.scompare1 %0\n"
  204. "mov %1, %0\n"
  205. "add %0, %0, %3\n"
  206. "s32c1i %0, %2, 0\n"
  207. : "+&a" (prev), "+&a"(saved) : "a" (integer), "a" (amount));
  208. return prev;
  209. #else
  210. XT_WSR_SCOMPARE1 (prev);
  211. val = prev + amount;
  212. saved = val;
  213. XT_S32C1I (val, integer, 0);
  214. return val;
  215. #endif
  216. }
  217. /* xmp_atomic_int_increment - Increment integer
  218. Synchronizing, blocking
  219. Usage:
  220. integer - points to an initialized integer
  221. amount - how much to increment
  222. Returns: new value of integer
  223. */
  224. static inline int
  225. xmp_atomic_int_increment (xmp_atomic_int_t * integer, int amount)
  226. {
  227. int val;
  228. int saved;
  229. #if USE_ASSEMBLY_IMPLEMENTATION
  230. /* %0 = val
  231. %1 = saved
  232. %2 = atomic integer pointer
  233. %3 = amount
  234. */
  235. asm volatile ("l32ai %0, %2, 0\n"
  236. "1:\n"
  237. "wsr.scompare1 %0\n"
  238. "mov %1, %0\n"
  239. "add %0, %0, %3\n"
  240. "s32c1i %0, %2, 0\n"
  241. "bne %0, %1, 1b\n"
  242. : "+&a" (val), "+&a"(saved) : "a" (integer), "a" (amount));
  243. #else
  244. /* Accurately naming "val" is tricky. Sometimes it will be what we
  245. want to be the new value, but sometimes it contains the value
  246. that is currently at the location. */
  247. /* Load location's current value */
  248. val = xmp_coherent_l32ai (integer);
  249. do {
  250. XT_WSR_SCOMPARE1 (val);
  251. saved = val;
  252. /* Change it to what we would like to store there--"new_val" */
  253. val = val + amount;
  254. /* Possibly store new_val, but reload location's current value no
  255. matter what. */
  256. XT_S32C1I (val, integer, 0);
  257. if (val != saved)
  258. xmp_spin();
  259. } while (val != saved);
  260. #endif
  261. return val + amount;
  262. }
  263. /* xmp_atomic_int_conditional_set - Set the value of an atomic integer
  264. Synchronizing, nonblocking
  265. Usage:
  266. integer - points to an initialized integer
  267. from - believed value of the integer
  268. eg: prev = xmp_atomic_int_value (integer);
  269. success = xmp_atomic_int_conditional_set (integer, 1, prev);
  270. to - new value
  271. Returns: current value of integer - user should check if it matches
  272. the previous value of the integer. If it does, then the update
  273. was successful.
  274. */
  275. static inline int
  276. xmp_atomic_int_conditional_set (xmp_atomic_int_t * integer, int from, int to)
  277. {
  278. int val;
  279. /* Don't even try to update if the integer's value isn't what we
  280. think it should be. This prevents acquiring this cache-line for
  281. writing and therefore prevents bus transactions when various
  282. cores contend. */
  283. val = xmp_coherent_l32ai(integer);
  284. if (val == from) {
  285. XT_WSR_SCOMPARE1 (from);
  286. val = to;
  287. /* Possibly store to, but reload location's current value no
  288. matter what. */
  289. XT_S32C1I (val, integer, 0);
  290. }
  291. return val;
  292. }
  293. /* Macros to implement trivial spin locks. These are very primitive, but
  294. can be useful when you don't need the higher-overhead synchronization.
  295. To use an xmp_atomic_int_t as a trivial spin lock, you should
  296. initialize it to zero first.
  297. */
  298. #define XMP_SIMPLE_SPINLOCK_ACQUIRE(atomic_int_ptr) \
  299. { while (xmp_atomic_int_conditional_set (atomic_int_ptr, 0, xmp_prid() + 1) != 0) \
  300. xmp_spin(); }
  301. #define XMP_SIMPLE_SPINLOCK_RELEASE(atomic_int_ptr) \
  302. { while (xmp_atomic_int_conditional_set (atomic_int_ptr, xmp_prid() + 1, 0) != xmp_prid() + 1) \
  303. xmp_spin(); }
  304. #define XMP_SIMPLE_SPINLOCK_OWNER(atomic_int_ptr) (xmp_atomic_int_value(atomic_int_ptr) - 1)
  305. /* -------------------------------------------------------------------------
  306. xmp_mutex_t - An even higher-level data structure to enforce
  307. mutual exclusion between cores. A core which waits on a mutex might
  308. sleep with a waiti and be interrupted by an interrupt.
  309. Mutexes can be normal or recursive. For a normal mutex, a core
  310. attempting to acquire a mutex it already holds will result in
  311. deadlock. For a recursive mutex, a core will succeed in acquiring a
  312. mutex it already holds, and must release it as many times as it
  313. acquired it.
  314. Mutexes are not sharable between interrupt levels--because
  315. ownership is tracked by core, not thread.
  316. Like all xmp data structures, an object of type xmp_mutex_t
  317. should be treated by the programmer as opaque. They are only
  318. public in this header file to allow them to be declared statically.
  319. For configurations with 16-byte cache lines, this has the most
  320. frequently used and changed data in the first line.
  321. */
  322. #if XCHAL_DCACHE_IS_COHERENT
  323. typedef struct xmp_mutex_t {
  324. xmp_atomic_int_t qlock;
  325. unsigned int qhead;
  326. unsigned int qtail;
  327. unsigned char queue[XMP_MAX_CORES];
  328. unsigned short held;
  329. unsigned char owner;
  330. unsigned char recursive : 1;
  331. unsigned char trace : 1;
  332. unsigned char system : 1;
  333. unsigned char unused : 5;
  334. const char * name;
  335. } xmp_mutex_t __attribute__ ((aligned (XMP_MAX_DCACHE_LINESIZE)));
  336. #define XMP_MUTEX_INITIALIZER(name) \
  337. { 0, 0, -1, {XMP_NO_OWNER, XMP_NO_OWNER, XMP_NO_OWNER, XMP_NO_OWNER}, \
  338. 0, XMP_NO_OWNER, XMP_MUTEX_FLAG_NORMAL, 0, 0, 0, name }
  339. #define XMP_RECURSIVE_MUTEX_INITIALIZER(name) \
  340. { 0, 0, -1, {XMP_NO_OWNER, XMP_NO_OWNER, XMP_NO_OWNER, XMP_NO_OWNER}, \
  341. 0, XMP_NO_OWNER, XMP_MUTEX_FLAG_RECURSIVE, 0, 0, 0, name }
  342. #define XMP_MUTEX_FLAG_NORMAL 0
  343. #define XMP_MUTEX_FLAG_RECURSIVE 1
  344. #define XMP_MUTEX_ACQUIRE_FAILED -1
  345. #define XMP_MUTEX_ERROR_DESTROY_OWNED -2
  346. #define XMP_MUTEX_ERROR_NOT_OWNED -3
  347. #define XMP_MUTEX_ERROR_ALREADY_OWNED -4
  348. /*
  349. xmp_mutex_init
  350. Nonsynchronizing
  351. Nonblocking
  352. Usage:
  353. mutex - points to an uninitialized mutex
  354. name - name if you want one, NULL if not.
  355. recursive - use recursive semantices
  356. Returns
  357. zero on success (always succeeds)
  358. */
  359. extern int xmp_mutex_init (xmp_mutex_t * mutex,
  360. const char * name,
  361. unsigned int recursive);
  362. /*
  363. int xmp_mutex_destroy (xmp_mutex_t * mutex);
  364. Synchronizing - will fail if mutex is held by anyone -- including
  365. current processor
  366. Nonblocking
  367. Usage:
  368. mutex - points to a mutex
  369. Returns
  370. zero on success
  371. non-zero if mutex is held
  372. */
  373. extern int xmp_mutex_destroy (xmp_mutex_t * mutex);
  374. /*
  375. xmp_mutex_lock -- Synchronizing
  376. xmp_mutex_trylock
  377. Usage:
  378. mutex - points to a mutex
  379. Returns
  380. zero on success
  381. */
  382. extern int xmp_mutex_lock (xmp_mutex_t * mutex);
  383. extern int xmp_mutex_trylock (xmp_mutex_t * mutex);
  384. /*
  385. xmp_mutex_unlock
  386. Synchronizing
  387. Nonblocking
  388. Usage:
  389. mutex - points to a mutex
  390. Returns
  391. zero on success - mutex is released
  392. non-zero on failure - mutex is owned by another core
  393. - prid of processor that does own it
  394. note that by the time this function
  395. returns, the owner of the core may
  396. have changed.
  397. */
  398. extern int xmp_mutex_unlock (xmp_mutex_t * mutex);
  399. /*
  400. xmp_mutex_name
  401. Nonsynchronizing
  402. Nonblocking
  403. Usage:
  404. mutex - points to a mutex
  405. Returns the name of the given mutex, which may be NULL.
  406. */
  407. const char * xmp_mutex_name (const xmp_mutex_t * mutex);
  408. /*
  409. xmp_mutex_trace_on
  410. xmp_mutex_trace_off
  411. Nonsynchronizing
  412. Nonblocking
  413. Turn off and on tracing for the mutex.
  414. These functions are only present in the debug version of the library.
  415. */
  416. extern void xmp_mutex_trace_on (xmp_mutex_t * mutex);
  417. extern void xmp_mutex_trace_off (xmp_mutex_t * mutex);
  418. /* -------------------------------------------------------------------------
  419. xmp_condition_t
  420. Condition Variables following Mesa semantics.
  421. Condition variables are not sharable among interrupt levels.
  422. */
  423. typedef struct xmp_condition_t {
  424. unsigned int qhead;
  425. unsigned int qtail;
  426. unsigned char queue[XMP_MAX_CORES];
  427. unsigned int waiting[XMP_MAX_CORES];
  428. unsigned char trace : 1;
  429. unsigned char unused : 7;
  430. const char * name;
  431. } xmp_condition_t __attribute__ ((aligned (XMP_MAX_DCACHE_LINESIZE)));
  432. #define XMP_CONDITION_INITIALIZER(name) \
  433. { 0, -1, {XMP_NO_OWNER, XMP_NO_OWNER, XMP_NO_OWNER, XMP_NO_OWNER}, \
  434. {0, 0, 0, 0}, 0, 0, name}
  435. /* xmp_condition_init - Initialize a condition variable
  436. Nonsynchronizing, Nonblocking
  437. Usage:
  438. condition - pointer to an xmp_condition_t
  439. On exit:
  440. condition initialized
  441. Errors: none
  442. */
  443. extern int xmp_condition_init (xmp_condition_t * condition,
  444. const char * name);
  445. extern int xmp_condition_destroy (xmp_condition_t * condition);
  446. /* xmp_condition_wait - Wait for a condition variable
  447. Synchronizing, blocking
  448. Usage:
  449. condition - pointer to an xmp_condition_t
  450. mutex - pointer to an xmp_mutex_t already acquired by the calling
  451. process
  452. Errors: if the mutex isn't held by this core
  453. */
  454. extern int xmp_condition_wait (xmp_condition_t * condition,
  455. xmp_mutex_t * mutex);
  456. /* xmp_condition_signal
  457. - Signal the first (if any) core waiting on a condition variable
  458. You must hold the mutex you passed to xmp_condition_wait before
  459. calling this function.
  460. Synchronizing, nonblocking
  461. Usage:
  462. condition - pointer to an xmp_condition_t
  463. Errors: none
  464. */
  465. extern int xmp_condition_signal (xmp_condition_t * condition);
  466. /* xmp_condition_broadcast
  467. - Signal all cores waiting on a condition variable
  468. Synchronizing, nonblocking
  469. You must hold the mutex you passed to xmp_condition_wait before
  470. calling this function.
  471. Usage:
  472. condition - pointer to an xmp_condition_t
  473. Errors: none
  474. */
  475. extern int xmp_condition_broadcast (xmp_condition_t * condition);
  476. static inline const char * xmp_condition_name (const xmp_condition_t * condition)
  477. {
  478. return condition->name;
  479. }
  480. /*
  481. xmp_condition_trace_on
  482. xmp_condition_trace_off
  483. Nonsynchronizing
  484. Nonblocking
  485. Turn off and on statistics and tracing for the condition. For
  486. tracing you must also set a trace file for the core.
  487. These functions are only present in the debug-version of the library.
  488. */
  489. extern void xmp_condition_trace_on (xmp_condition_t * condition);
  490. extern void xmp_condition_trace_off (xmp_condition_t * condition);
  491. #endif /* XCHAL_DCACHE_IS_COHERENT */
  492. /* -------------------------------------------------------------------------
  493. xmp_barrier_t
  494. Classic barriers that stop any core from continuing until a
  495. specified number of cores reach that point. Once the barrier allows
  496. cores through, the barrier is reset and will stop cores from
  497. progressing again.
  498. Barriers are not sharable among interrupt levels.
  499. */
  500. typedef struct xmp_barrier_t
  501. {
  502. xmp_atomic_int_t count;
  503. xmp_atomic_int_t state;
  504. xmp_atomic_int_t sleeping;
  505. unsigned short num_cores;
  506. unsigned short trace : 1;
  507. unsigned short system : 1;
  508. const char * name;
  509. } xmp_barrier_t __attribute__ ((aligned (XMP_MAX_DCACHE_LINESIZE)));
  510. #define XMP_BARRIER_INITIALIZER(number, name) \
  511. { 0, 0, 0, number, 0, 0, name }
  512. /* xmp_barrier_init - Initialize a barrier
  513. Nonsynchronizing, Nonblocking
  514. Usage:
  515. barrier - pointer to an xmp_barrier_t
  516. num_cores - number of cores needed to arrive at the
  517. barrier before any are allowed through
  518. On exit:
  519. barrier initialized
  520. Always returns zero.
  521. Errors: none
  522. */
  523. extern int xmp_barrier_init (xmp_barrier_t * barrier, int num_cores,
  524. const char * name);
  525. /* xmp_barrier_wait - Wait on a barrier
  526. Nonsynchronizing, Nonblocking
  527. Usage:
  528. barrier - pointer to an xmp_barrier_t
  529. On exit:
  530. Enough cores (as determined at the barrier's initialization)
  531. have reached the barrier.
  532. Errors: none
  533. */
  534. extern int xmp_barrier_wait (xmp_barrier_t * barrier);
  535. static inline const char * xmp_barrier_name (const xmp_barrier_t * barrier)
  536. {
  537. return barrier->name;
  538. }
  539. /*
  540. xmp_barrier_trace_on
  541. xmp_barrier_trace_off
  542. Nonsynchronizing
  543. Nonblocking
  544. Turn on and off tracing for the barrier. For
  545. tracing you must also set a trace file for the core.
  546. These functions are only present in the debug-version of the library.
  547. */
  548. extern void xmp_barrier_trace_on (xmp_barrier_t * barrier);
  549. extern void xmp_barrier_trace_off (xmp_barrier_t * barrier);
  550. /* -------------------------------------------------------------------------
  551. Portions of the library that are internal, but belong here for
  552. convenience.
  553. */
  554. extern xmp_atomic_int_t _ResetSync;
  555. static inline void
  556. xmp_initial_sync (int num_cores)
  557. {
  558. xmp_atomic_int_increment (&_ResetSync, 1);
  559. while (xmp_coherent_l32ai (&_ResetSync) != num_cores)
  560. xmp_spin ();
  561. }
  562. #ifdef __cplusplus
  563. }
  564. #endif
  565. #endif /* _XMP_LIBRARY_H */