atomic_base.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. // -*- C++ -*- header.
  2. // Copyright (C) 2008-2017 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/atomic_base.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{atomic}
  23. */
  24. #ifndef _GLIBCXX_ATOMIC_BASE_H
  25. #define _GLIBCXX_ATOMIC_BASE_H 1
  26. #pragma GCC system_header
  27. #include <bits/c++config.h>
  28. #include <stdint.h>
  29. #include <bits/atomic_lockfree_defines.h>
  30. #ifndef _GLIBCXX_ALWAYS_INLINE
  31. #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
  32. #endif
  33. namespace std _GLIBCXX_VISIBILITY(default)
  34. {
  35. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  36. /**
  37. * @defgroup atomics Atomics
  38. *
  39. * Components for performing atomic operations.
  40. * @{
  41. */
  42. /// Enumeration for memory_order
  43. typedef enum memory_order
  44. {
  45. memory_order_relaxed,
  46. memory_order_consume,
  47. memory_order_acquire,
  48. memory_order_release,
  49. memory_order_acq_rel,
  50. memory_order_seq_cst
  51. } memory_order;
  52. enum __memory_order_modifier
  53. {
  54. __memory_order_mask = 0x0ffff,
  55. __memory_order_modifier_mask = 0xffff0000,
  56. __memory_order_hle_acquire = 0x10000,
  57. __memory_order_hle_release = 0x20000
  58. };
  59. constexpr memory_order
  60. operator|(memory_order __m, __memory_order_modifier __mod)
  61. {
  62. return memory_order(__m | int(__mod));
  63. }
  64. constexpr memory_order
  65. operator&(memory_order __m, __memory_order_modifier __mod)
  66. {
  67. return memory_order(__m & int(__mod));
  68. }
  69. // Drop release ordering as per [atomics.types.operations.req]/21
  70. constexpr memory_order
  71. __cmpexch_failure_order2(memory_order __m) noexcept
  72. {
  73. return __m == memory_order_acq_rel ? memory_order_acquire
  74. : __m == memory_order_release ? memory_order_relaxed : __m;
  75. }
  76. constexpr memory_order
  77. __cmpexch_failure_order(memory_order __m) noexcept
  78. {
  79. return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
  80. | (__m & __memory_order_modifier_mask));
  81. }
  82. _GLIBCXX_ALWAYS_INLINE void
  83. atomic_thread_fence(memory_order __m) noexcept
  84. { __atomic_thread_fence(__m); }
  85. _GLIBCXX_ALWAYS_INLINE void
  86. atomic_signal_fence(memory_order __m) noexcept
  87. { __atomic_signal_fence(__m); }
  88. /// kill_dependency
  89. template<typename _Tp>
  90. inline _Tp
  91. kill_dependency(_Tp __y) noexcept
  92. {
  93. _Tp __ret(__y);
  94. return __ret;
  95. }
  96. // Base types for atomics.
  97. template<typename _IntTp>
  98. struct __atomic_base;
  99. #define ATOMIC_VAR_INIT(_VI) { _VI }
  100. template<typename _Tp>
  101. struct atomic;
  102. template<typename _Tp>
  103. struct atomic<_Tp*>;
  104. /* The target's "set" value for test-and-set may not be exactly 1. */
  105. #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
  106. typedef bool __atomic_flag_data_type;
  107. #else
  108. typedef unsigned char __atomic_flag_data_type;
  109. #endif
  110. /**
  111. * @brief Base type for atomic_flag.
  112. *
  113. * Base type is POD with data, allowing atomic_flag to derive from
  114. * it and meet the standard layout type requirement. In addition to
  115. * compatibility with a C interface, this allows different
  116. * implementations of atomic_flag to use the same atomic operation
  117. * functions, via a standard conversion to the __atomic_flag_base
  118. * argument.
  119. */
  120. _GLIBCXX_BEGIN_EXTERN_C
  121. struct __atomic_flag_base
  122. {
  123. __atomic_flag_data_type _M_i;
  124. };
  125. _GLIBCXX_END_EXTERN_C
  126. #define ATOMIC_FLAG_INIT { 0 }
  127. /// atomic_flag
  128. struct atomic_flag : public __atomic_flag_base
  129. {
  130. atomic_flag() noexcept = default;
  131. ~atomic_flag() noexcept = default;
  132. atomic_flag(const atomic_flag&) = delete;
  133. atomic_flag& operator=(const atomic_flag&) = delete;
  134. atomic_flag& operator=(const atomic_flag&) volatile = delete;
  135. // Conversion to ATOMIC_FLAG_INIT.
  136. constexpr atomic_flag(bool __i) noexcept
  137. : __atomic_flag_base{ _S_init(__i) }
  138. { }
  139. _GLIBCXX_ALWAYS_INLINE bool
  140. test_and_set(memory_order __m = memory_order_seq_cst) noexcept
  141. {
  142. return __atomic_test_and_set (&_M_i, __m);
  143. }
  144. _GLIBCXX_ALWAYS_INLINE bool
  145. test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
  146. {
  147. return __atomic_test_and_set (&_M_i, __m);
  148. }
  149. _GLIBCXX_ALWAYS_INLINE void
  150. clear(memory_order __m = memory_order_seq_cst) noexcept
  151. {
  152. memory_order __b = __m & __memory_order_mask;
  153. __glibcxx_assert(__b != memory_order_consume);
  154. __glibcxx_assert(__b != memory_order_acquire);
  155. __glibcxx_assert(__b != memory_order_acq_rel);
  156. __atomic_clear (&_M_i, __m);
  157. }
  158. _GLIBCXX_ALWAYS_INLINE void
  159. clear(memory_order __m = memory_order_seq_cst) volatile noexcept
  160. {
  161. memory_order __b = __m & __memory_order_mask;
  162. __glibcxx_assert(__b != memory_order_consume);
  163. __glibcxx_assert(__b != memory_order_acquire);
  164. __glibcxx_assert(__b != memory_order_acq_rel);
  165. __atomic_clear (&_M_i, __m);
  166. }
  167. private:
  168. static constexpr __atomic_flag_data_type
  169. _S_init(bool __i)
  170. { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
  171. };
  172. /// Base class for atomic integrals.
  173. //
  174. // For each of the integral types, define atomic_[integral type] struct
  175. //
  176. // atomic_bool bool
  177. // atomic_char char
  178. // atomic_schar signed char
  179. // atomic_uchar unsigned char
  180. // atomic_short short
  181. // atomic_ushort unsigned short
  182. // atomic_int int
  183. // atomic_uint unsigned int
  184. // atomic_long long
  185. // atomic_ulong unsigned long
  186. // atomic_llong long long
  187. // atomic_ullong unsigned long long
  188. // atomic_char16_t char16_t
  189. // atomic_char32_t char32_t
  190. // atomic_wchar_t wchar_t
  191. //
  192. // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
  193. // 8 bytes, since that is what GCC built-in functions for atomic
  194. // memory access expect.
  195. template<typename _ITp>
  196. struct __atomic_base
  197. {
  198. private:
  199. typedef _ITp __int_type;
  200. static constexpr int _S_alignment =
  201. sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
  202. alignas(_S_alignment) __int_type _M_i;
  203. public:
  204. __atomic_base() noexcept = default;
  205. ~__atomic_base() noexcept = default;
  206. __atomic_base(const __atomic_base&) = delete;
  207. __atomic_base& operator=(const __atomic_base&) = delete;
  208. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  209. // Requires __int_type convertible to _M_i.
  210. constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
  211. operator __int_type() const noexcept
  212. { return load(); }
  213. operator __int_type() const volatile noexcept
  214. { return load(); }
  215. __int_type
  216. operator=(__int_type __i) noexcept
  217. {
  218. store(__i);
  219. return __i;
  220. }
  221. __int_type
  222. operator=(__int_type __i) volatile noexcept
  223. {
  224. store(__i);
  225. return __i;
  226. }
  227. __int_type
  228. operator++(int) noexcept
  229. { return fetch_add(1); }
  230. __int_type
  231. operator++(int) volatile noexcept
  232. { return fetch_add(1); }
  233. __int_type
  234. operator--(int) noexcept
  235. { return fetch_sub(1); }
  236. __int_type
  237. operator--(int) volatile noexcept
  238. { return fetch_sub(1); }
  239. __int_type
  240. operator++() noexcept
  241. { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  242. __int_type
  243. operator++() volatile noexcept
  244. { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  245. __int_type
  246. operator--() noexcept
  247. { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  248. __int_type
  249. operator--() volatile noexcept
  250. { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  251. __int_type
  252. operator+=(__int_type __i) noexcept
  253. { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  254. __int_type
  255. operator+=(__int_type __i) volatile noexcept
  256. { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  257. __int_type
  258. operator-=(__int_type __i) noexcept
  259. { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  260. __int_type
  261. operator-=(__int_type __i) volatile noexcept
  262. { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  263. __int_type
  264. operator&=(__int_type __i) noexcept
  265. { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  266. __int_type
  267. operator&=(__int_type __i) volatile noexcept
  268. { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  269. __int_type
  270. operator|=(__int_type __i) noexcept
  271. { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  272. __int_type
  273. operator|=(__int_type __i) volatile noexcept
  274. { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  275. __int_type
  276. operator^=(__int_type __i) noexcept
  277. { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  278. __int_type
  279. operator^=(__int_type __i) volatile noexcept
  280. { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  281. bool
  282. is_lock_free() const noexcept
  283. {
  284. // Use a fake, minimally aligned pointer.
  285. return __atomic_is_lock_free(sizeof(_M_i),
  286. reinterpret_cast<void *>(-__alignof(_M_i)));
  287. }
  288. bool
  289. is_lock_free() const volatile noexcept
  290. {
  291. // Use a fake, minimally aligned pointer.
  292. return __atomic_is_lock_free(sizeof(_M_i),
  293. reinterpret_cast<void *>(-__alignof(_M_i)));
  294. }
  295. _GLIBCXX_ALWAYS_INLINE void
  296. store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
  297. {
  298. memory_order __b = __m & __memory_order_mask;
  299. __glibcxx_assert(__b != memory_order_acquire);
  300. __glibcxx_assert(__b != memory_order_acq_rel);
  301. __glibcxx_assert(__b != memory_order_consume);
  302. __atomic_store_n(&_M_i, __i, __m);
  303. }
  304. _GLIBCXX_ALWAYS_INLINE void
  305. store(__int_type __i,
  306. memory_order __m = memory_order_seq_cst) volatile noexcept
  307. {
  308. memory_order __b = __m & __memory_order_mask;
  309. __glibcxx_assert(__b != memory_order_acquire);
  310. __glibcxx_assert(__b != memory_order_acq_rel);
  311. __glibcxx_assert(__b != memory_order_consume);
  312. __atomic_store_n(&_M_i, __i, __m);
  313. }
  314. _GLIBCXX_ALWAYS_INLINE __int_type
  315. load(memory_order __m = memory_order_seq_cst) const noexcept
  316. {
  317. memory_order __b = __m & __memory_order_mask;
  318. __glibcxx_assert(__b != memory_order_release);
  319. __glibcxx_assert(__b != memory_order_acq_rel);
  320. return __atomic_load_n(&_M_i, __m);
  321. }
  322. _GLIBCXX_ALWAYS_INLINE __int_type
  323. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  324. {
  325. memory_order __b = __m & __memory_order_mask;
  326. __glibcxx_assert(__b != memory_order_release);
  327. __glibcxx_assert(__b != memory_order_acq_rel);
  328. return __atomic_load_n(&_M_i, __m);
  329. }
  330. _GLIBCXX_ALWAYS_INLINE __int_type
  331. exchange(__int_type __i,
  332. memory_order __m = memory_order_seq_cst) noexcept
  333. {
  334. return __atomic_exchange_n(&_M_i, __i, __m);
  335. }
  336. _GLIBCXX_ALWAYS_INLINE __int_type
  337. exchange(__int_type __i,
  338. memory_order __m = memory_order_seq_cst) volatile noexcept
  339. {
  340. return __atomic_exchange_n(&_M_i, __i, __m);
  341. }
  342. _GLIBCXX_ALWAYS_INLINE bool
  343. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  344. memory_order __m1, memory_order __m2) noexcept
  345. {
  346. memory_order __b2 = __m2 & __memory_order_mask;
  347. memory_order __b1 = __m1 & __memory_order_mask;
  348. __glibcxx_assert(__b2 != memory_order_release);
  349. __glibcxx_assert(__b2 != memory_order_acq_rel);
  350. __glibcxx_assert(__b2 <= __b1);
  351. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
  352. }
  353. _GLIBCXX_ALWAYS_INLINE bool
  354. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  355. memory_order __m1,
  356. memory_order __m2) volatile noexcept
  357. {
  358. memory_order __b2 = __m2 & __memory_order_mask;
  359. memory_order __b1 = __m1 & __memory_order_mask;
  360. __glibcxx_assert(__b2 != memory_order_release);
  361. __glibcxx_assert(__b2 != memory_order_acq_rel);
  362. __glibcxx_assert(__b2 <= __b1);
  363. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
  364. }
  365. _GLIBCXX_ALWAYS_INLINE bool
  366. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  367. memory_order __m = memory_order_seq_cst) noexcept
  368. {
  369. return compare_exchange_weak(__i1, __i2, __m,
  370. __cmpexch_failure_order(__m));
  371. }
  372. _GLIBCXX_ALWAYS_INLINE bool
  373. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  374. memory_order __m = memory_order_seq_cst) volatile noexcept
  375. {
  376. return compare_exchange_weak(__i1, __i2, __m,
  377. __cmpexch_failure_order(__m));
  378. }
  379. _GLIBCXX_ALWAYS_INLINE bool
  380. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  381. memory_order __m1, memory_order __m2) noexcept
  382. {
  383. memory_order __b2 = __m2 & __memory_order_mask;
  384. memory_order __b1 = __m1 & __memory_order_mask;
  385. __glibcxx_assert(__b2 != memory_order_release);
  386. __glibcxx_assert(__b2 != memory_order_acq_rel);
  387. __glibcxx_assert(__b2 <= __b1);
  388. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
  389. }
  390. _GLIBCXX_ALWAYS_INLINE bool
  391. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  392. memory_order __m1,
  393. memory_order __m2) volatile noexcept
  394. {
  395. memory_order __b2 = __m2 & __memory_order_mask;
  396. memory_order __b1 = __m1 & __memory_order_mask;
  397. __glibcxx_assert(__b2 != memory_order_release);
  398. __glibcxx_assert(__b2 != memory_order_acq_rel);
  399. __glibcxx_assert(__b2 <= __b1);
  400. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
  401. }
  402. _GLIBCXX_ALWAYS_INLINE bool
  403. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  404. memory_order __m = memory_order_seq_cst) noexcept
  405. {
  406. return compare_exchange_strong(__i1, __i2, __m,
  407. __cmpexch_failure_order(__m));
  408. }
  409. _GLIBCXX_ALWAYS_INLINE bool
  410. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  411. memory_order __m = memory_order_seq_cst) volatile noexcept
  412. {
  413. return compare_exchange_strong(__i1, __i2, __m,
  414. __cmpexch_failure_order(__m));
  415. }
  416. _GLIBCXX_ALWAYS_INLINE __int_type
  417. fetch_add(__int_type __i,
  418. memory_order __m = memory_order_seq_cst) noexcept
  419. { return __atomic_fetch_add(&_M_i, __i, __m); }
  420. _GLIBCXX_ALWAYS_INLINE __int_type
  421. fetch_add(__int_type __i,
  422. memory_order __m = memory_order_seq_cst) volatile noexcept
  423. { return __atomic_fetch_add(&_M_i, __i, __m); }
  424. _GLIBCXX_ALWAYS_INLINE __int_type
  425. fetch_sub(__int_type __i,
  426. memory_order __m = memory_order_seq_cst) noexcept
  427. { return __atomic_fetch_sub(&_M_i, __i, __m); }
  428. _GLIBCXX_ALWAYS_INLINE __int_type
  429. fetch_sub(__int_type __i,
  430. memory_order __m = memory_order_seq_cst) volatile noexcept
  431. { return __atomic_fetch_sub(&_M_i, __i, __m); }
  432. _GLIBCXX_ALWAYS_INLINE __int_type
  433. fetch_and(__int_type __i,
  434. memory_order __m = memory_order_seq_cst) noexcept
  435. { return __atomic_fetch_and(&_M_i, __i, __m); }
  436. _GLIBCXX_ALWAYS_INLINE __int_type
  437. fetch_and(__int_type __i,
  438. memory_order __m = memory_order_seq_cst) volatile noexcept
  439. { return __atomic_fetch_and(&_M_i, __i, __m); }
  440. _GLIBCXX_ALWAYS_INLINE __int_type
  441. fetch_or(__int_type __i,
  442. memory_order __m = memory_order_seq_cst) noexcept
  443. { return __atomic_fetch_or(&_M_i, __i, __m); }
  444. _GLIBCXX_ALWAYS_INLINE __int_type
  445. fetch_or(__int_type __i,
  446. memory_order __m = memory_order_seq_cst) volatile noexcept
  447. { return __atomic_fetch_or(&_M_i, __i, __m); }
  448. _GLIBCXX_ALWAYS_INLINE __int_type
  449. fetch_xor(__int_type __i,
  450. memory_order __m = memory_order_seq_cst) noexcept
  451. { return __atomic_fetch_xor(&_M_i, __i, __m); }
  452. _GLIBCXX_ALWAYS_INLINE __int_type
  453. fetch_xor(__int_type __i,
  454. memory_order __m = memory_order_seq_cst) volatile noexcept
  455. { return __atomic_fetch_xor(&_M_i, __i, __m); }
  456. };
  457. /// Partial specialization for pointer types.
  458. template<typename _PTp>
  459. struct __atomic_base<_PTp*>
  460. {
  461. private:
  462. typedef _PTp* __pointer_type;
  463. __pointer_type _M_p;
  464. // Factored out to facilitate explicit specialization.
  465. constexpr ptrdiff_t
  466. _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
  467. constexpr ptrdiff_t
  468. _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
  469. public:
  470. __atomic_base() noexcept = default;
  471. ~__atomic_base() noexcept = default;
  472. __atomic_base(const __atomic_base&) = delete;
  473. __atomic_base& operator=(const __atomic_base&) = delete;
  474. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  475. // Requires __pointer_type convertible to _M_p.
  476. constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
  477. operator __pointer_type() const noexcept
  478. { return load(); }
  479. operator __pointer_type() const volatile noexcept
  480. { return load(); }
  481. __pointer_type
  482. operator=(__pointer_type __p) noexcept
  483. {
  484. store(__p);
  485. return __p;
  486. }
  487. __pointer_type
  488. operator=(__pointer_type __p) volatile noexcept
  489. {
  490. store(__p);
  491. return __p;
  492. }
  493. __pointer_type
  494. operator++(int) noexcept
  495. { return fetch_add(1); }
  496. __pointer_type
  497. operator++(int) volatile noexcept
  498. { return fetch_add(1); }
  499. __pointer_type
  500. operator--(int) noexcept
  501. { return fetch_sub(1); }
  502. __pointer_type
  503. operator--(int) volatile noexcept
  504. { return fetch_sub(1); }
  505. __pointer_type
  506. operator++() noexcept
  507. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  508. memory_order_seq_cst); }
  509. __pointer_type
  510. operator++() volatile noexcept
  511. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  512. memory_order_seq_cst); }
  513. __pointer_type
  514. operator--() noexcept
  515. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  516. memory_order_seq_cst); }
  517. __pointer_type
  518. operator--() volatile noexcept
  519. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  520. memory_order_seq_cst); }
  521. __pointer_type
  522. operator+=(ptrdiff_t __d) noexcept
  523. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  524. memory_order_seq_cst); }
  525. __pointer_type
  526. operator+=(ptrdiff_t __d) volatile noexcept
  527. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  528. memory_order_seq_cst); }
  529. __pointer_type
  530. operator-=(ptrdiff_t __d) noexcept
  531. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  532. memory_order_seq_cst); }
  533. __pointer_type
  534. operator-=(ptrdiff_t __d) volatile noexcept
  535. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  536. memory_order_seq_cst); }
  537. bool
  538. is_lock_free() const noexcept
  539. {
  540. // Produce a fake, minimally aligned pointer.
  541. return __atomic_is_lock_free(sizeof(_M_p),
  542. reinterpret_cast<void *>(-__alignof(_M_p)));
  543. }
  544. bool
  545. is_lock_free() const volatile noexcept
  546. {
  547. // Produce a fake, minimally aligned pointer.
  548. return __atomic_is_lock_free(sizeof(_M_p),
  549. reinterpret_cast<void *>(-__alignof(_M_p)));
  550. }
  551. _GLIBCXX_ALWAYS_INLINE void
  552. store(__pointer_type __p,
  553. memory_order __m = memory_order_seq_cst) noexcept
  554. {
  555. memory_order __b = __m & __memory_order_mask;
  556. __glibcxx_assert(__b != memory_order_acquire);
  557. __glibcxx_assert(__b != memory_order_acq_rel);
  558. __glibcxx_assert(__b != memory_order_consume);
  559. __atomic_store_n(&_M_p, __p, __m);
  560. }
  561. _GLIBCXX_ALWAYS_INLINE void
  562. store(__pointer_type __p,
  563. memory_order __m = memory_order_seq_cst) volatile noexcept
  564. {
  565. memory_order __b = __m & __memory_order_mask;
  566. __glibcxx_assert(__b != memory_order_acquire);
  567. __glibcxx_assert(__b != memory_order_acq_rel);
  568. __glibcxx_assert(__b != memory_order_consume);
  569. __atomic_store_n(&_M_p, __p, __m);
  570. }
  571. _GLIBCXX_ALWAYS_INLINE __pointer_type
  572. load(memory_order __m = memory_order_seq_cst) const noexcept
  573. {
  574. memory_order __b = __m & __memory_order_mask;
  575. __glibcxx_assert(__b != memory_order_release);
  576. __glibcxx_assert(__b != memory_order_acq_rel);
  577. return __atomic_load_n(&_M_p, __m);
  578. }
  579. _GLIBCXX_ALWAYS_INLINE __pointer_type
  580. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  581. {
  582. memory_order __b = __m & __memory_order_mask;
  583. __glibcxx_assert(__b != memory_order_release);
  584. __glibcxx_assert(__b != memory_order_acq_rel);
  585. return __atomic_load_n(&_M_p, __m);
  586. }
  587. _GLIBCXX_ALWAYS_INLINE __pointer_type
  588. exchange(__pointer_type __p,
  589. memory_order __m = memory_order_seq_cst) noexcept
  590. {
  591. return __atomic_exchange_n(&_M_p, __p, __m);
  592. }
  593. _GLIBCXX_ALWAYS_INLINE __pointer_type
  594. exchange(__pointer_type __p,
  595. memory_order __m = memory_order_seq_cst) volatile noexcept
  596. {
  597. return __atomic_exchange_n(&_M_p, __p, __m);
  598. }
  599. _GLIBCXX_ALWAYS_INLINE bool
  600. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  601. memory_order __m1,
  602. memory_order __m2) noexcept
  603. {
  604. memory_order __b2 = __m2 & __memory_order_mask;
  605. memory_order __b1 = __m1 & __memory_order_mask;
  606. __glibcxx_assert(__b2 != memory_order_release);
  607. __glibcxx_assert(__b2 != memory_order_acq_rel);
  608. __glibcxx_assert(__b2 <= __b1);
  609. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
  610. }
  611. _GLIBCXX_ALWAYS_INLINE bool
  612. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  613. memory_order __m1,
  614. memory_order __m2) volatile noexcept
  615. {
  616. memory_order __b2 = __m2 & __memory_order_mask;
  617. memory_order __b1 = __m1 & __memory_order_mask;
  618. __glibcxx_assert(__b2 != memory_order_release);
  619. __glibcxx_assert(__b2 != memory_order_acq_rel);
  620. __glibcxx_assert(__b2 <= __b1);
  621. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
  622. }
  623. _GLIBCXX_ALWAYS_INLINE __pointer_type
  624. fetch_add(ptrdiff_t __d,
  625. memory_order __m = memory_order_seq_cst) noexcept
  626. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
  627. _GLIBCXX_ALWAYS_INLINE __pointer_type
  628. fetch_add(ptrdiff_t __d,
  629. memory_order __m = memory_order_seq_cst) volatile noexcept
  630. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); }
  631. _GLIBCXX_ALWAYS_INLINE __pointer_type
  632. fetch_sub(ptrdiff_t __d,
  633. memory_order __m = memory_order_seq_cst) noexcept
  634. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
  635. _GLIBCXX_ALWAYS_INLINE __pointer_type
  636. fetch_sub(ptrdiff_t __d,
  637. memory_order __m = memory_order_seq_cst) volatile noexcept
  638. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); }
  639. };
  640. // @} group atomics
  641. _GLIBCXX_END_NAMESPACE_VERSION
  642. } // namespace std
  643. #endif