atomic_base.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. // -*- C++ -*- header.
  2. // Copyright (C) 2008-2019 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/atomic_base.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{atomic}
  23. */
  24. #ifndef _GLIBCXX_ATOMIC_BASE_H
  25. #define _GLIBCXX_ATOMIC_BASE_H 1
  26. #pragma GCC system_header
  27. #include <bits/c++config.h>
  28. #include <stdint.h>
  29. #include <bits/atomic_lockfree_defines.h>
  30. #ifndef _GLIBCXX_ALWAYS_INLINE
  31. #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
  32. #endif
  33. namespace std _GLIBCXX_VISIBILITY(default)
  34. {
  35. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  36. /**
  37. * @defgroup atomics Atomics
  38. *
  39. * Components for performing atomic operations.
  40. * @{
  41. */
  42. /// Enumeration for memory_order
  43. #if __cplusplus > 201703L
  44. enum class memory_order : int
  45. {
  46. relaxed,
  47. consume,
  48. acquire,
  49. release,
  50. acq_rel,
  51. seq_cst
  52. };
  53. inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
  54. inline constexpr memory_order memory_order_consume = memory_order::consume;
  55. inline constexpr memory_order memory_order_acquire = memory_order::acquire;
  56. inline constexpr memory_order memory_order_release = memory_order::release;
  57. inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
  58. inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
  59. #else
  60. typedef enum memory_order
  61. {
  62. memory_order_relaxed,
  63. memory_order_consume,
  64. memory_order_acquire,
  65. memory_order_release,
  66. memory_order_acq_rel,
  67. memory_order_seq_cst
  68. } memory_order;
  69. #endif
  70. enum __memory_order_modifier
  71. {
  72. __memory_order_mask = 0x0ffff,
  73. __memory_order_modifier_mask = 0xffff0000,
  74. __memory_order_hle_acquire = 0x10000,
  75. __memory_order_hle_release = 0x20000
  76. };
  77. constexpr memory_order
  78. operator|(memory_order __m, __memory_order_modifier __mod)
  79. {
  80. return memory_order(int(__m) | int(__mod));
  81. }
  82. constexpr memory_order
  83. operator&(memory_order __m, __memory_order_modifier __mod)
  84. {
  85. return memory_order(int(__m) & int(__mod));
  86. }
  87. // Drop release ordering as per [atomics.types.operations.req]/21
  88. constexpr memory_order
  89. __cmpexch_failure_order2(memory_order __m) noexcept
  90. {
  91. return __m == memory_order_acq_rel ? memory_order_acquire
  92. : __m == memory_order_release ? memory_order_relaxed : __m;
  93. }
  94. constexpr memory_order
  95. __cmpexch_failure_order(memory_order __m) noexcept
  96. {
  97. return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
  98. | __memory_order_modifier(__m & __memory_order_modifier_mask));
  99. }
  100. _GLIBCXX_ALWAYS_INLINE void
  101. atomic_thread_fence(memory_order __m) noexcept
  102. { __atomic_thread_fence(int(__m)); }
  103. _GLIBCXX_ALWAYS_INLINE void
  104. atomic_signal_fence(memory_order __m) noexcept
  105. { __atomic_signal_fence(int(__m)); }
  106. /// kill_dependency
  107. template<typename _Tp>
  108. inline _Tp
  109. kill_dependency(_Tp __y) noexcept
  110. {
  111. _Tp __ret(__y);
  112. return __ret;
  113. }
  114. // Base types for atomics.
  115. template<typename _IntTp>
  116. struct __atomic_base;
  117. #define ATOMIC_VAR_INIT(_VI) { _VI }
  118. template<typename _Tp>
  119. struct atomic;
  120. template<typename _Tp>
  121. struct atomic<_Tp*>;
  122. /* The target's "set" value for test-and-set may not be exactly 1. */
  123. #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
  124. typedef bool __atomic_flag_data_type;
  125. #else
  126. typedef unsigned char __atomic_flag_data_type;
  127. #endif
  128. /**
  129. * @brief Base type for atomic_flag.
  130. *
  131. * Base type is POD with data, allowing atomic_flag to derive from
  132. * it and meet the standard layout type requirement. In addition to
  133. * compatibility with a C interface, this allows different
  134. * implementations of atomic_flag to use the same atomic operation
  135. * functions, via a standard conversion to the __atomic_flag_base
  136. * argument.
  137. */
  138. _GLIBCXX_BEGIN_EXTERN_C
  139. struct __atomic_flag_base
  140. {
  141. __atomic_flag_data_type _M_i;
  142. };
  143. _GLIBCXX_END_EXTERN_C
  144. #define ATOMIC_FLAG_INIT { 0 }
  145. /// atomic_flag
  146. struct atomic_flag : public __atomic_flag_base
  147. {
  148. atomic_flag() noexcept = default;
  149. ~atomic_flag() noexcept = default;
  150. atomic_flag(const atomic_flag&) = delete;
  151. atomic_flag& operator=(const atomic_flag&) = delete;
  152. atomic_flag& operator=(const atomic_flag&) volatile = delete;
  153. // Conversion to ATOMIC_FLAG_INIT.
  154. constexpr atomic_flag(bool __i) noexcept
  155. : __atomic_flag_base{ _S_init(__i) }
  156. { }
  157. _GLIBCXX_ALWAYS_INLINE bool
  158. test_and_set(memory_order __m = memory_order_seq_cst) noexcept
  159. {
  160. return __atomic_test_and_set (&_M_i, int(__m));
  161. }
  162. _GLIBCXX_ALWAYS_INLINE bool
  163. test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
  164. {
  165. return __atomic_test_and_set (&_M_i, int(__m));
  166. }
  167. _GLIBCXX_ALWAYS_INLINE void
  168. clear(memory_order __m = memory_order_seq_cst) noexcept
  169. {
  170. memory_order __b = __m & __memory_order_mask;
  171. __glibcxx_assert(__b != memory_order_consume);
  172. __glibcxx_assert(__b != memory_order_acquire);
  173. __glibcxx_assert(__b != memory_order_acq_rel);
  174. __atomic_clear (&_M_i, int(__m));
  175. }
  176. _GLIBCXX_ALWAYS_INLINE void
  177. clear(memory_order __m = memory_order_seq_cst) volatile noexcept
  178. {
  179. memory_order __b = __m & __memory_order_mask;
  180. __glibcxx_assert(__b != memory_order_consume);
  181. __glibcxx_assert(__b != memory_order_acquire);
  182. __glibcxx_assert(__b != memory_order_acq_rel);
  183. __atomic_clear (&_M_i, int(__m));
  184. }
  185. private:
  186. static constexpr __atomic_flag_data_type
  187. _S_init(bool __i)
  188. { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
  189. };
  190. /// Base class for atomic integrals.
  191. //
  192. // For each of the integral types, define atomic_[integral type] struct
  193. //
  194. // atomic_bool bool
  195. // atomic_char char
  196. // atomic_schar signed char
  197. // atomic_uchar unsigned char
  198. // atomic_short short
  199. // atomic_ushort unsigned short
  200. // atomic_int int
  201. // atomic_uint unsigned int
  202. // atomic_long long
  203. // atomic_ulong unsigned long
  204. // atomic_llong long long
  205. // atomic_ullong unsigned long long
  206. // atomic_char8_t char8_t
  207. // atomic_char16_t char16_t
  208. // atomic_char32_t char32_t
  209. // atomic_wchar_t wchar_t
  210. //
  211. // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
  212. // 8 bytes, since that is what GCC built-in functions for atomic
  213. // memory access expect.
  214. template<typename _ITp>
  215. struct __atomic_base
  216. {
  217. using value_type = _ITp;
  218. using difference_type = value_type;
  219. private:
  220. typedef _ITp __int_type;
  221. static constexpr int _S_alignment =
  222. sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
  223. alignas(_S_alignment) __int_type _M_i;
  224. public:
  225. __atomic_base() noexcept = default;
  226. ~__atomic_base() noexcept = default;
  227. __atomic_base(const __atomic_base&) = delete;
  228. __atomic_base& operator=(const __atomic_base&) = delete;
  229. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  230. // Requires __int_type convertible to _M_i.
  231. constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
  232. operator __int_type() const noexcept
  233. { return load(); }
  234. operator __int_type() const volatile noexcept
  235. { return load(); }
  236. __int_type
  237. operator=(__int_type __i) noexcept
  238. {
  239. store(__i);
  240. return __i;
  241. }
  242. __int_type
  243. operator=(__int_type __i) volatile noexcept
  244. {
  245. store(__i);
  246. return __i;
  247. }
  248. __int_type
  249. operator++(int) noexcept
  250. { return fetch_add(1); }
  251. __int_type
  252. operator++(int) volatile noexcept
  253. { return fetch_add(1); }
  254. __int_type
  255. operator--(int) noexcept
  256. { return fetch_sub(1); }
  257. __int_type
  258. operator--(int) volatile noexcept
  259. { return fetch_sub(1); }
  260. __int_type
  261. operator++() noexcept
  262. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  263. __int_type
  264. operator++() volatile noexcept
  265. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  266. __int_type
  267. operator--() noexcept
  268. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  269. __int_type
  270. operator--() volatile noexcept
  271. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  272. __int_type
  273. operator+=(__int_type __i) noexcept
  274. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  275. __int_type
  276. operator+=(__int_type __i) volatile noexcept
  277. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  278. __int_type
  279. operator-=(__int_type __i) noexcept
  280. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  281. __int_type
  282. operator-=(__int_type __i) volatile noexcept
  283. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  284. __int_type
  285. operator&=(__int_type __i) noexcept
  286. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  287. __int_type
  288. operator&=(__int_type __i) volatile noexcept
  289. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  290. __int_type
  291. operator|=(__int_type __i) noexcept
  292. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  293. __int_type
  294. operator|=(__int_type __i) volatile noexcept
  295. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  296. __int_type
  297. operator^=(__int_type __i) noexcept
  298. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  299. __int_type
  300. operator^=(__int_type __i) volatile noexcept
  301. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  302. bool
  303. is_lock_free() const noexcept
  304. {
  305. // Use a fake, minimally aligned pointer.
  306. return __atomic_is_lock_free(sizeof(_M_i),
  307. reinterpret_cast<void *>(-_S_alignment));
  308. }
  309. bool
  310. is_lock_free() const volatile noexcept
  311. {
  312. // Use a fake, minimally aligned pointer.
  313. return __atomic_is_lock_free(sizeof(_M_i),
  314. reinterpret_cast<void *>(-_S_alignment));
  315. }
  316. _GLIBCXX_ALWAYS_INLINE void
  317. store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
  318. {
  319. memory_order __b = __m & __memory_order_mask;
  320. __glibcxx_assert(__b != memory_order_acquire);
  321. __glibcxx_assert(__b != memory_order_acq_rel);
  322. __glibcxx_assert(__b != memory_order_consume);
  323. __atomic_store_n(&_M_i, __i, int(__m));
  324. }
  325. _GLIBCXX_ALWAYS_INLINE void
  326. store(__int_type __i,
  327. memory_order __m = memory_order_seq_cst) volatile noexcept
  328. {
  329. memory_order __b = __m & __memory_order_mask;
  330. __glibcxx_assert(__b != memory_order_acquire);
  331. __glibcxx_assert(__b != memory_order_acq_rel);
  332. __glibcxx_assert(__b != memory_order_consume);
  333. __atomic_store_n(&_M_i, __i, int(__m));
  334. }
  335. _GLIBCXX_ALWAYS_INLINE __int_type
  336. load(memory_order __m = memory_order_seq_cst) const noexcept
  337. {
  338. memory_order __b = __m & __memory_order_mask;
  339. __glibcxx_assert(__b != memory_order_release);
  340. __glibcxx_assert(__b != memory_order_acq_rel);
  341. return __atomic_load_n(&_M_i, int(__m));
  342. }
  343. _GLIBCXX_ALWAYS_INLINE __int_type
  344. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  345. {
  346. memory_order __b = __m & __memory_order_mask;
  347. __glibcxx_assert(__b != memory_order_release);
  348. __glibcxx_assert(__b != memory_order_acq_rel);
  349. return __atomic_load_n(&_M_i, int(__m));
  350. }
  351. _GLIBCXX_ALWAYS_INLINE __int_type
  352. exchange(__int_type __i,
  353. memory_order __m = memory_order_seq_cst) noexcept
  354. {
  355. return __atomic_exchange_n(&_M_i, __i, int(__m));
  356. }
  357. _GLIBCXX_ALWAYS_INLINE __int_type
  358. exchange(__int_type __i,
  359. memory_order __m = memory_order_seq_cst) volatile noexcept
  360. {
  361. return __atomic_exchange_n(&_M_i, __i, int(__m));
  362. }
  363. _GLIBCXX_ALWAYS_INLINE bool
  364. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  365. memory_order __m1, memory_order __m2) noexcept
  366. {
  367. memory_order __b2 = __m2 & __memory_order_mask;
  368. memory_order __b1 = __m1 & __memory_order_mask;
  369. __glibcxx_assert(__b2 != memory_order_release);
  370. __glibcxx_assert(__b2 != memory_order_acq_rel);
  371. __glibcxx_assert(__b2 <= __b1);
  372. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  373. int(__m1), int(__m2));
  374. }
  375. _GLIBCXX_ALWAYS_INLINE bool
  376. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  377. memory_order __m1,
  378. memory_order __m2) volatile noexcept
  379. {
  380. memory_order __b2 = __m2 & __memory_order_mask;
  381. memory_order __b1 = __m1 & __memory_order_mask;
  382. __glibcxx_assert(__b2 != memory_order_release);
  383. __glibcxx_assert(__b2 != memory_order_acq_rel);
  384. __glibcxx_assert(__b2 <= __b1);
  385. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  386. int(__m1), int(__m2));
  387. }
  388. _GLIBCXX_ALWAYS_INLINE bool
  389. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  390. memory_order __m = memory_order_seq_cst) noexcept
  391. {
  392. return compare_exchange_weak(__i1, __i2, __m,
  393. __cmpexch_failure_order(__m));
  394. }
  395. _GLIBCXX_ALWAYS_INLINE bool
  396. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  397. memory_order __m = memory_order_seq_cst) volatile noexcept
  398. {
  399. return compare_exchange_weak(__i1, __i2, __m,
  400. __cmpexch_failure_order(__m));
  401. }
  402. _GLIBCXX_ALWAYS_INLINE bool
  403. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  404. memory_order __m1, memory_order __m2) noexcept
  405. {
  406. memory_order __b2 = __m2 & __memory_order_mask;
  407. memory_order __b1 = __m1 & __memory_order_mask;
  408. __glibcxx_assert(__b2 != memory_order_release);
  409. __glibcxx_assert(__b2 != memory_order_acq_rel);
  410. __glibcxx_assert(__b2 <= __b1);
  411. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  412. int(__m1), int(__m2));
  413. }
  414. _GLIBCXX_ALWAYS_INLINE bool
  415. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  416. memory_order __m1,
  417. memory_order __m2) volatile noexcept
  418. {
  419. memory_order __b2 = __m2 & __memory_order_mask;
  420. memory_order __b1 = __m1 & __memory_order_mask;
  421. __glibcxx_assert(__b2 != memory_order_release);
  422. __glibcxx_assert(__b2 != memory_order_acq_rel);
  423. __glibcxx_assert(__b2 <= __b1);
  424. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  425. int(__m1), int(__m2));
  426. }
  427. _GLIBCXX_ALWAYS_INLINE bool
  428. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  429. memory_order __m = memory_order_seq_cst) noexcept
  430. {
  431. return compare_exchange_strong(__i1, __i2, __m,
  432. __cmpexch_failure_order(__m));
  433. }
  434. _GLIBCXX_ALWAYS_INLINE bool
  435. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  436. memory_order __m = memory_order_seq_cst) volatile noexcept
  437. {
  438. return compare_exchange_strong(__i1, __i2, __m,
  439. __cmpexch_failure_order(__m));
  440. }
  441. _GLIBCXX_ALWAYS_INLINE __int_type
  442. fetch_add(__int_type __i,
  443. memory_order __m = memory_order_seq_cst) noexcept
  444. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  445. _GLIBCXX_ALWAYS_INLINE __int_type
  446. fetch_add(__int_type __i,
  447. memory_order __m = memory_order_seq_cst) volatile noexcept
  448. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  449. _GLIBCXX_ALWAYS_INLINE __int_type
  450. fetch_sub(__int_type __i,
  451. memory_order __m = memory_order_seq_cst) noexcept
  452. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  453. _GLIBCXX_ALWAYS_INLINE __int_type
  454. fetch_sub(__int_type __i,
  455. memory_order __m = memory_order_seq_cst) volatile noexcept
  456. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  457. _GLIBCXX_ALWAYS_INLINE __int_type
  458. fetch_and(__int_type __i,
  459. memory_order __m = memory_order_seq_cst) noexcept
  460. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  461. _GLIBCXX_ALWAYS_INLINE __int_type
  462. fetch_and(__int_type __i,
  463. memory_order __m = memory_order_seq_cst) volatile noexcept
  464. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  465. _GLIBCXX_ALWAYS_INLINE __int_type
  466. fetch_or(__int_type __i,
  467. memory_order __m = memory_order_seq_cst) noexcept
  468. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  469. _GLIBCXX_ALWAYS_INLINE __int_type
  470. fetch_or(__int_type __i,
  471. memory_order __m = memory_order_seq_cst) volatile noexcept
  472. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  473. _GLIBCXX_ALWAYS_INLINE __int_type
  474. fetch_xor(__int_type __i,
  475. memory_order __m = memory_order_seq_cst) noexcept
  476. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  477. _GLIBCXX_ALWAYS_INLINE __int_type
  478. fetch_xor(__int_type __i,
  479. memory_order __m = memory_order_seq_cst) volatile noexcept
  480. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  481. };
  482. /// Partial specialization for pointer types.
  483. template<typename _PTp>
  484. struct __atomic_base<_PTp*>
  485. {
  486. private:
  487. typedef _PTp* __pointer_type;
  488. __pointer_type _M_p;
  489. // Factored out to facilitate explicit specialization.
  490. constexpr ptrdiff_t
  491. _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
  492. constexpr ptrdiff_t
  493. _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
  494. public:
  495. __atomic_base() noexcept = default;
  496. ~__atomic_base() noexcept = default;
  497. __atomic_base(const __atomic_base&) = delete;
  498. __atomic_base& operator=(const __atomic_base&) = delete;
  499. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  500. // Requires __pointer_type convertible to _M_p.
  501. constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
  502. operator __pointer_type() const noexcept
  503. { return load(); }
  504. operator __pointer_type() const volatile noexcept
  505. { return load(); }
  506. __pointer_type
  507. operator=(__pointer_type __p) noexcept
  508. {
  509. store(__p);
  510. return __p;
  511. }
  512. __pointer_type
  513. operator=(__pointer_type __p) volatile noexcept
  514. {
  515. store(__p);
  516. return __p;
  517. }
  518. __pointer_type
  519. operator++(int) noexcept
  520. { return fetch_add(1); }
  521. __pointer_type
  522. operator++(int) volatile noexcept
  523. { return fetch_add(1); }
  524. __pointer_type
  525. operator--(int) noexcept
  526. { return fetch_sub(1); }
  527. __pointer_type
  528. operator--(int) volatile noexcept
  529. { return fetch_sub(1); }
  530. __pointer_type
  531. operator++() noexcept
  532. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  533. int(memory_order_seq_cst)); }
  534. __pointer_type
  535. operator++() volatile noexcept
  536. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  537. int(memory_order_seq_cst)); }
  538. __pointer_type
  539. operator--() noexcept
  540. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  541. int(memory_order_seq_cst)); }
  542. __pointer_type
  543. operator--() volatile noexcept
  544. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  545. int(memory_order_seq_cst)); }
  546. __pointer_type
  547. operator+=(ptrdiff_t __d) noexcept
  548. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  549. int(memory_order_seq_cst)); }
  550. __pointer_type
  551. operator+=(ptrdiff_t __d) volatile noexcept
  552. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  553. int(memory_order_seq_cst)); }
  554. __pointer_type
  555. operator-=(ptrdiff_t __d) noexcept
  556. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  557. int(memory_order_seq_cst)); }
  558. __pointer_type
  559. operator-=(ptrdiff_t __d) volatile noexcept
  560. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  561. int(memory_order_seq_cst)); }
  562. bool
  563. is_lock_free() const noexcept
  564. {
  565. // Produce a fake, minimally aligned pointer.
  566. return __atomic_is_lock_free(sizeof(_M_p),
  567. reinterpret_cast<void *>(-__alignof(_M_p)));
  568. }
  569. bool
  570. is_lock_free() const volatile noexcept
  571. {
  572. // Produce a fake, minimally aligned pointer.
  573. return __atomic_is_lock_free(sizeof(_M_p),
  574. reinterpret_cast<void *>(-__alignof(_M_p)));
  575. }
  576. _GLIBCXX_ALWAYS_INLINE void
  577. store(__pointer_type __p,
  578. memory_order __m = memory_order_seq_cst) noexcept
  579. {
  580. memory_order __b = __m & __memory_order_mask;
  581. __glibcxx_assert(__b != memory_order_acquire);
  582. __glibcxx_assert(__b != memory_order_acq_rel);
  583. __glibcxx_assert(__b != memory_order_consume);
  584. __atomic_store_n(&_M_p, __p, int(__m));
  585. }
  586. _GLIBCXX_ALWAYS_INLINE void
  587. store(__pointer_type __p,
  588. memory_order __m = memory_order_seq_cst) volatile noexcept
  589. {
  590. memory_order __b = __m & __memory_order_mask;
  591. __glibcxx_assert(__b != memory_order_acquire);
  592. __glibcxx_assert(__b != memory_order_acq_rel);
  593. __glibcxx_assert(__b != memory_order_consume);
  594. __atomic_store_n(&_M_p, __p, int(__m));
  595. }
  596. _GLIBCXX_ALWAYS_INLINE __pointer_type
  597. load(memory_order __m = memory_order_seq_cst) const noexcept
  598. {
  599. memory_order __b = __m & __memory_order_mask;
  600. __glibcxx_assert(__b != memory_order_release);
  601. __glibcxx_assert(__b != memory_order_acq_rel);
  602. return __atomic_load_n(&_M_p, int(__m));
  603. }
  604. _GLIBCXX_ALWAYS_INLINE __pointer_type
  605. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  606. {
  607. memory_order __b = __m & __memory_order_mask;
  608. __glibcxx_assert(__b != memory_order_release);
  609. __glibcxx_assert(__b != memory_order_acq_rel);
  610. return __atomic_load_n(&_M_p, int(__m));
  611. }
  612. _GLIBCXX_ALWAYS_INLINE __pointer_type
  613. exchange(__pointer_type __p,
  614. memory_order __m = memory_order_seq_cst) noexcept
  615. {
  616. return __atomic_exchange_n(&_M_p, __p, int(__m));
  617. }
  618. _GLIBCXX_ALWAYS_INLINE __pointer_type
  619. exchange(__pointer_type __p,
  620. memory_order __m = memory_order_seq_cst) volatile noexcept
  621. {
  622. return __atomic_exchange_n(&_M_p, __p, int(__m));
  623. }
  624. _GLIBCXX_ALWAYS_INLINE bool
  625. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  626. memory_order __m1,
  627. memory_order __m2) noexcept
  628. {
  629. memory_order __b2 = __m2 & __memory_order_mask;
  630. memory_order __b1 = __m1 & __memory_order_mask;
  631. __glibcxx_assert(__b2 != memory_order_release);
  632. __glibcxx_assert(__b2 != memory_order_acq_rel);
  633. __glibcxx_assert(__b2 <= __b1);
  634. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  635. int(__m1), int(__m2));
  636. }
  637. _GLIBCXX_ALWAYS_INLINE bool
  638. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  639. memory_order __m1,
  640. memory_order __m2) volatile noexcept
  641. {
  642. memory_order __b2 = __m2 & __memory_order_mask;
  643. memory_order __b1 = __m1 & __memory_order_mask;
  644. __glibcxx_assert(__b2 != memory_order_release);
  645. __glibcxx_assert(__b2 != memory_order_acq_rel);
  646. __glibcxx_assert(__b2 <= __b1);
  647. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  648. int(__m1), int(__m2));
  649. }
  650. _GLIBCXX_ALWAYS_INLINE __pointer_type
  651. fetch_add(ptrdiff_t __d,
  652. memory_order __m = memory_order_seq_cst) noexcept
  653. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  654. _GLIBCXX_ALWAYS_INLINE __pointer_type
  655. fetch_add(ptrdiff_t __d,
  656. memory_order __m = memory_order_seq_cst) volatile noexcept
  657. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  658. _GLIBCXX_ALWAYS_INLINE __pointer_type
  659. fetch_sub(ptrdiff_t __d,
  660. memory_order __m = memory_order_seq_cst) noexcept
  661. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  662. _GLIBCXX_ALWAYS_INLINE __pointer_type
  663. fetch_sub(ptrdiff_t __d,
  664. memory_order __m = memory_order_seq_cst) volatile noexcept
  665. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  666. };
  667. // @} group atomics
  668. _GLIBCXX_END_NAMESPACE_VERSION
  669. } // namespace std
  670. #endif