atomic_base.h 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951
  1. // -*- C++ -*- header.
  2. // Copyright (C) 2008-2021 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/atomic_base.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{atomic}
  23. */
  24. #ifndef _GLIBCXX_ATOMIC_BASE_H
  25. #define _GLIBCXX_ATOMIC_BASE_H 1
  26. #pragma GCC system_header
  27. #include <bits/c++config.h>
  28. #include <stdint.h>
  29. #include <bits/atomic_lockfree_defines.h>
  30. #include <bits/move.h>
  31. #if __cplusplus > 201703L
  32. #include <bits/atomic_wait.h>
  33. #endif
  34. #ifndef _GLIBCXX_ALWAYS_INLINE
  35. #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
  36. #endif
  37. namespace std _GLIBCXX_VISIBILITY(default)
  38. {
  39. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  40. /**
  41. * @defgroup atomics Atomics
  42. *
  43. * Components for performing atomic operations.
  44. * @{
  45. */
  46. /// Enumeration for memory_order
  47. #if __cplusplus > 201703L
  48. enum class memory_order : int
  49. {
  50. relaxed,
  51. consume,
  52. acquire,
  53. release,
  54. acq_rel,
  55. seq_cst
  56. };
  57. inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
  58. inline constexpr memory_order memory_order_consume = memory_order::consume;
  59. inline constexpr memory_order memory_order_acquire = memory_order::acquire;
  60. inline constexpr memory_order memory_order_release = memory_order::release;
  61. inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
  62. inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
  63. #else
  64. typedef enum memory_order
  65. {
  66. memory_order_relaxed,
  67. memory_order_consume,
  68. memory_order_acquire,
  69. memory_order_release,
  70. memory_order_acq_rel,
  71. memory_order_seq_cst
  72. } memory_order;
  73. #endif
  74. enum __memory_order_modifier
  75. {
  76. __memory_order_mask = 0x0ffff,
  77. __memory_order_modifier_mask = 0xffff0000,
  78. __memory_order_hle_acquire = 0x10000,
  79. __memory_order_hle_release = 0x20000
  80. };
  81. constexpr memory_order
  82. operator|(memory_order __m, __memory_order_modifier __mod)
  83. {
  84. return memory_order(int(__m) | int(__mod));
  85. }
  86. constexpr memory_order
  87. operator&(memory_order __m, __memory_order_modifier __mod)
  88. {
  89. return memory_order(int(__m) & int(__mod));
  90. }
  91. // Drop release ordering as per [atomics.types.operations.req]/21
  92. constexpr memory_order
  93. __cmpexch_failure_order2(memory_order __m) noexcept
  94. {
  95. return __m == memory_order_acq_rel ? memory_order_acquire
  96. : __m == memory_order_release ? memory_order_relaxed : __m;
  97. }
  98. constexpr memory_order
  99. __cmpexch_failure_order(memory_order __m) noexcept
  100. {
  101. return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
  102. | __memory_order_modifier(__m & __memory_order_modifier_mask));
  103. }
  104. _GLIBCXX_ALWAYS_INLINE void
  105. atomic_thread_fence(memory_order __m) noexcept
  106. { __atomic_thread_fence(int(__m)); }
  107. _GLIBCXX_ALWAYS_INLINE void
  108. atomic_signal_fence(memory_order __m) noexcept
  109. { __atomic_signal_fence(int(__m)); }
  110. /// kill_dependency
  111. template<typename _Tp>
  112. inline _Tp
  113. kill_dependency(_Tp __y) noexcept
  114. {
  115. _Tp __ret(__y);
  116. return __ret;
  117. }
  118. // Base types for atomics.
  119. template<typename _IntTp>
  120. struct __atomic_base;
  121. #if __cplusplus <= 201703L
  122. # define _GLIBCXX20_INIT(I)
  123. #else
  124. # define __cpp_lib_atomic_value_initialization 201911L
  125. # define _GLIBCXX20_INIT(I) = I
  126. #endif
  127. #define ATOMIC_VAR_INIT(_VI) { _VI }
  128. template<typename _Tp>
  129. struct atomic;
  130. template<typename _Tp>
  131. struct atomic<_Tp*>;
  132. /* The target's "set" value for test-and-set may not be exactly 1. */
  133. #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
  134. typedef bool __atomic_flag_data_type;
  135. #else
  136. typedef unsigned char __atomic_flag_data_type;
  137. #endif
  138. /**
  139. * @brief Base type for atomic_flag.
  140. *
  141. * Base type is POD with data, allowing atomic_flag to derive from
  142. * it and meet the standard layout type requirement. In addition to
  143. * compatibility with a C interface, this allows different
  144. * implementations of atomic_flag to use the same atomic operation
  145. * functions, via a standard conversion to the __atomic_flag_base
  146. * argument.
  147. */
  148. _GLIBCXX_BEGIN_EXTERN_C
  149. struct __atomic_flag_base
  150. {
  151. __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
  152. };
  153. _GLIBCXX_END_EXTERN_C
  154. #define ATOMIC_FLAG_INIT { 0 }
  155. /// atomic_flag
  156. struct atomic_flag : public __atomic_flag_base
  157. {
  158. atomic_flag() noexcept = default;
  159. ~atomic_flag() noexcept = default;
  160. atomic_flag(const atomic_flag&) = delete;
  161. atomic_flag& operator=(const atomic_flag&) = delete;
  162. atomic_flag& operator=(const atomic_flag&) volatile = delete;
  163. // Conversion to ATOMIC_FLAG_INIT.
  164. constexpr atomic_flag(bool __i) noexcept
  165. : __atomic_flag_base{ _S_init(__i) }
  166. { }
  167. _GLIBCXX_ALWAYS_INLINE bool
  168. test_and_set(memory_order __m = memory_order_seq_cst) noexcept
  169. {
  170. return __atomic_test_and_set (&_M_i, int(__m));
  171. }
  172. _GLIBCXX_ALWAYS_INLINE bool
  173. test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
  174. {
  175. return __atomic_test_and_set (&_M_i, int(__m));
  176. }
  177. #if __cplusplus > 201703L
  178. #define __cpp_lib_atomic_flag_test 201907L
  179. _GLIBCXX_ALWAYS_INLINE bool
  180. test(memory_order __m = memory_order_seq_cst) const noexcept
  181. {
  182. __atomic_flag_data_type __v;
  183. __atomic_load(&_M_i, &__v, int(__m));
  184. return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
  185. }
  186. _GLIBCXX_ALWAYS_INLINE bool
  187. test(memory_order __m = memory_order_seq_cst) const volatile noexcept
  188. {
  189. __atomic_flag_data_type __v;
  190. __atomic_load(&_M_i, &__v, int(__m));
  191. return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
  192. }
  193. #if __cpp_lib_atomic_wait
  194. _GLIBCXX_ALWAYS_INLINE void
  195. wait(bool __old,
  196. memory_order __m = memory_order_seq_cst) const noexcept
  197. {
  198. const __atomic_flag_data_type __v
  199. = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
  200. std::__atomic_wait_address_v(&_M_i, __v,
  201. [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
  202. }
  203. // TODO add const volatile overload
  204. _GLIBCXX_ALWAYS_INLINE void
  205. notify_one() const noexcept
  206. { std::__atomic_notify_address(&_M_i, false); }
  207. // TODO add const volatile overload
  208. _GLIBCXX_ALWAYS_INLINE void
  209. notify_all() const noexcept
  210. { std::__atomic_notify_address(&_M_i, true); }
  211. // TODO add const volatile overload
  212. #endif // __cpp_lib_atomic_wait
  213. #endif // C++20
  214. _GLIBCXX_ALWAYS_INLINE void
  215. clear(memory_order __m = memory_order_seq_cst) noexcept
  216. {
  217. memory_order __b __attribute__ ((__unused__))
  218. = __m & __memory_order_mask;
  219. __glibcxx_assert(__b != memory_order_consume);
  220. __glibcxx_assert(__b != memory_order_acquire);
  221. __glibcxx_assert(__b != memory_order_acq_rel);
  222. __atomic_clear (&_M_i, int(__m));
  223. }
  224. _GLIBCXX_ALWAYS_INLINE void
  225. clear(memory_order __m = memory_order_seq_cst) volatile noexcept
  226. {
  227. memory_order __b __attribute__ ((__unused__))
  228. = __m & __memory_order_mask;
  229. __glibcxx_assert(__b != memory_order_consume);
  230. __glibcxx_assert(__b != memory_order_acquire);
  231. __glibcxx_assert(__b != memory_order_acq_rel);
  232. __atomic_clear (&_M_i, int(__m));
  233. }
  234. private:
  235. static constexpr __atomic_flag_data_type
  236. _S_init(bool __i)
  237. { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
  238. };
  239. /// Base class for atomic integrals.
  240. //
  241. // For each of the integral types, define atomic_[integral type] struct
  242. //
  243. // atomic_bool bool
  244. // atomic_char char
  245. // atomic_schar signed char
  246. // atomic_uchar unsigned char
  247. // atomic_short short
  248. // atomic_ushort unsigned short
  249. // atomic_int int
  250. // atomic_uint unsigned int
  251. // atomic_long long
  252. // atomic_ulong unsigned long
  253. // atomic_llong long long
  254. // atomic_ullong unsigned long long
  255. // atomic_char8_t char8_t
  256. // atomic_char16_t char16_t
  257. // atomic_char32_t char32_t
  258. // atomic_wchar_t wchar_t
  259. //
  260. // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
  261. // 8 bytes, since that is what GCC built-in functions for atomic
  262. // memory access expect.
  263. template<typename _ITp>
  264. struct __atomic_base
  265. {
  266. using value_type = _ITp;
  267. using difference_type = value_type;
  268. private:
  269. typedef _ITp __int_type;
  270. static constexpr int _S_alignment =
  271. sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
  272. alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
  273. public:
  274. __atomic_base() noexcept = default;
  275. ~__atomic_base() noexcept = default;
  276. __atomic_base(const __atomic_base&) = delete;
  277. __atomic_base& operator=(const __atomic_base&) = delete;
  278. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  279. // Requires __int_type convertible to _M_i.
  280. constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
  281. operator __int_type() const noexcept
  282. { return load(); }
  283. operator __int_type() const volatile noexcept
  284. { return load(); }
  285. __int_type
  286. operator=(__int_type __i) noexcept
  287. {
  288. store(__i);
  289. return __i;
  290. }
  291. __int_type
  292. operator=(__int_type __i) volatile noexcept
  293. {
  294. store(__i);
  295. return __i;
  296. }
  297. __int_type
  298. operator++(int) noexcept
  299. { return fetch_add(1); }
  300. __int_type
  301. operator++(int) volatile noexcept
  302. { return fetch_add(1); }
  303. __int_type
  304. operator--(int) noexcept
  305. { return fetch_sub(1); }
  306. __int_type
  307. operator--(int) volatile noexcept
  308. { return fetch_sub(1); }
  309. __int_type
  310. operator++() noexcept
  311. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  312. __int_type
  313. operator++() volatile noexcept
  314. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  315. __int_type
  316. operator--() noexcept
  317. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  318. __int_type
  319. operator--() volatile noexcept
  320. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  321. __int_type
  322. operator+=(__int_type __i) noexcept
  323. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  324. __int_type
  325. operator+=(__int_type __i) volatile noexcept
  326. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  327. __int_type
  328. operator-=(__int_type __i) noexcept
  329. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  330. __int_type
  331. operator-=(__int_type __i) volatile noexcept
  332. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  333. __int_type
  334. operator&=(__int_type __i) noexcept
  335. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  336. __int_type
  337. operator&=(__int_type __i) volatile noexcept
  338. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  339. __int_type
  340. operator|=(__int_type __i) noexcept
  341. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  342. __int_type
  343. operator|=(__int_type __i) volatile noexcept
  344. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  345. __int_type
  346. operator^=(__int_type __i) noexcept
  347. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  348. __int_type
  349. operator^=(__int_type __i) volatile noexcept
  350. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  351. bool
  352. is_lock_free() const noexcept
  353. {
  354. // Use a fake, minimally aligned pointer.
  355. return __atomic_is_lock_free(sizeof(_M_i),
  356. reinterpret_cast<void *>(-_S_alignment));
  357. }
  358. bool
  359. is_lock_free() const volatile noexcept
  360. {
  361. // Use a fake, minimally aligned pointer.
  362. return __atomic_is_lock_free(sizeof(_M_i),
  363. reinterpret_cast<void *>(-_S_alignment));
  364. }
  365. _GLIBCXX_ALWAYS_INLINE void
  366. store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
  367. {
  368. memory_order __b __attribute__ ((__unused__))
  369. = __m & __memory_order_mask;
  370. __glibcxx_assert(__b != memory_order_acquire);
  371. __glibcxx_assert(__b != memory_order_acq_rel);
  372. __glibcxx_assert(__b != memory_order_consume);
  373. __atomic_store_n(&_M_i, __i, int(__m));
  374. }
  375. _GLIBCXX_ALWAYS_INLINE void
  376. store(__int_type __i,
  377. memory_order __m = memory_order_seq_cst) volatile noexcept
  378. {
  379. memory_order __b __attribute__ ((__unused__))
  380. = __m & __memory_order_mask;
  381. __glibcxx_assert(__b != memory_order_acquire);
  382. __glibcxx_assert(__b != memory_order_acq_rel);
  383. __glibcxx_assert(__b != memory_order_consume);
  384. __atomic_store_n(&_M_i, __i, int(__m));
  385. }
  386. _GLIBCXX_ALWAYS_INLINE __int_type
  387. load(memory_order __m = memory_order_seq_cst) const noexcept
  388. {
  389. memory_order __b __attribute__ ((__unused__))
  390. = __m & __memory_order_mask;
  391. __glibcxx_assert(__b != memory_order_release);
  392. __glibcxx_assert(__b != memory_order_acq_rel);
  393. return __atomic_load_n(&_M_i, int(__m));
  394. }
  395. _GLIBCXX_ALWAYS_INLINE __int_type
  396. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  397. {
  398. memory_order __b __attribute__ ((__unused__))
  399. = __m & __memory_order_mask;
  400. __glibcxx_assert(__b != memory_order_release);
  401. __glibcxx_assert(__b != memory_order_acq_rel);
  402. return __atomic_load_n(&_M_i, int(__m));
  403. }
  404. _GLIBCXX_ALWAYS_INLINE __int_type
  405. exchange(__int_type __i,
  406. memory_order __m = memory_order_seq_cst) noexcept
  407. {
  408. return __atomic_exchange_n(&_M_i, __i, int(__m));
  409. }
  410. _GLIBCXX_ALWAYS_INLINE __int_type
  411. exchange(__int_type __i,
  412. memory_order __m = memory_order_seq_cst) volatile noexcept
  413. {
  414. return __atomic_exchange_n(&_M_i, __i, int(__m));
  415. }
  416. _GLIBCXX_ALWAYS_INLINE bool
  417. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  418. memory_order __m1, memory_order __m2) noexcept
  419. {
  420. memory_order __b2 __attribute__ ((__unused__))
  421. = __m2 & __memory_order_mask;
  422. memory_order __b1 __attribute__ ((__unused__))
  423. = __m1 & __memory_order_mask;
  424. __glibcxx_assert(__b2 != memory_order_release);
  425. __glibcxx_assert(__b2 != memory_order_acq_rel);
  426. __glibcxx_assert(__b2 <= __b1);
  427. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  428. int(__m1), int(__m2));
  429. }
  430. _GLIBCXX_ALWAYS_INLINE bool
  431. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  432. memory_order __m1,
  433. memory_order __m2) volatile noexcept
  434. {
  435. memory_order __b2 __attribute__ ((__unused__))
  436. = __m2 & __memory_order_mask;
  437. memory_order __b1 __attribute__ ((__unused__))
  438. = __m1 & __memory_order_mask;
  439. __glibcxx_assert(__b2 != memory_order_release);
  440. __glibcxx_assert(__b2 != memory_order_acq_rel);
  441. __glibcxx_assert(__b2 <= __b1);
  442. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  443. int(__m1), int(__m2));
  444. }
  445. _GLIBCXX_ALWAYS_INLINE bool
  446. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  447. memory_order __m = memory_order_seq_cst) noexcept
  448. {
  449. return compare_exchange_weak(__i1, __i2, __m,
  450. __cmpexch_failure_order(__m));
  451. }
  452. _GLIBCXX_ALWAYS_INLINE bool
  453. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  454. memory_order __m = memory_order_seq_cst) volatile noexcept
  455. {
  456. return compare_exchange_weak(__i1, __i2, __m,
  457. __cmpexch_failure_order(__m));
  458. }
  459. _GLIBCXX_ALWAYS_INLINE bool
  460. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  461. memory_order __m1, memory_order __m2) noexcept
  462. {
  463. memory_order __b2 __attribute__ ((__unused__))
  464. = __m2 & __memory_order_mask;
  465. memory_order __b1 __attribute__ ((__unused__))
  466. = __m1 & __memory_order_mask;
  467. __glibcxx_assert(__b2 != memory_order_release);
  468. __glibcxx_assert(__b2 != memory_order_acq_rel);
  469. __glibcxx_assert(__b2 <= __b1);
  470. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  471. int(__m1), int(__m2));
  472. }
  473. _GLIBCXX_ALWAYS_INLINE bool
  474. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  475. memory_order __m1,
  476. memory_order __m2) volatile noexcept
  477. {
  478. memory_order __b2 __attribute__ ((__unused__))
  479. = __m2 & __memory_order_mask;
  480. memory_order __b1 __attribute__ ((__unused__))
  481. = __m1 & __memory_order_mask;
  482. __glibcxx_assert(__b2 != memory_order_release);
  483. __glibcxx_assert(__b2 != memory_order_acq_rel);
  484. __glibcxx_assert(__b2 <= __b1);
  485. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  486. int(__m1), int(__m2));
  487. }
  488. _GLIBCXX_ALWAYS_INLINE bool
  489. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  490. memory_order __m = memory_order_seq_cst) noexcept
  491. {
  492. return compare_exchange_strong(__i1, __i2, __m,
  493. __cmpexch_failure_order(__m));
  494. }
  495. _GLIBCXX_ALWAYS_INLINE bool
  496. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  497. memory_order __m = memory_order_seq_cst) volatile noexcept
  498. {
  499. return compare_exchange_strong(__i1, __i2, __m,
  500. __cmpexch_failure_order(__m));
  501. }
  502. #if __cpp_lib_atomic_wait
  503. _GLIBCXX_ALWAYS_INLINE void
  504. wait(__int_type __old,
  505. memory_order __m = memory_order_seq_cst) const noexcept
  506. {
  507. std::__atomic_wait_address_v(&_M_i, __old,
  508. [__m, this] { return this->load(__m); });
  509. }
  510. // TODO add const volatile overload
  511. _GLIBCXX_ALWAYS_INLINE void
  512. notify_one() const noexcept
  513. { std::__atomic_notify_address(&_M_i, false); }
  514. // TODO add const volatile overload
  515. _GLIBCXX_ALWAYS_INLINE void
  516. notify_all() const noexcept
  517. { std::__atomic_notify_address(&_M_i, true); }
  518. // TODO add const volatile overload
  519. #endif // __cpp_lib_atomic_wait
  520. _GLIBCXX_ALWAYS_INLINE __int_type
  521. fetch_add(__int_type __i,
  522. memory_order __m = memory_order_seq_cst) noexcept
  523. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  524. _GLIBCXX_ALWAYS_INLINE __int_type
  525. fetch_add(__int_type __i,
  526. memory_order __m = memory_order_seq_cst) volatile noexcept
  527. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  528. _GLIBCXX_ALWAYS_INLINE __int_type
  529. fetch_sub(__int_type __i,
  530. memory_order __m = memory_order_seq_cst) noexcept
  531. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  532. _GLIBCXX_ALWAYS_INLINE __int_type
  533. fetch_sub(__int_type __i,
  534. memory_order __m = memory_order_seq_cst) volatile noexcept
  535. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  536. _GLIBCXX_ALWAYS_INLINE __int_type
  537. fetch_and(__int_type __i,
  538. memory_order __m = memory_order_seq_cst) noexcept
  539. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  540. _GLIBCXX_ALWAYS_INLINE __int_type
  541. fetch_and(__int_type __i,
  542. memory_order __m = memory_order_seq_cst) volatile noexcept
  543. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  544. _GLIBCXX_ALWAYS_INLINE __int_type
  545. fetch_or(__int_type __i,
  546. memory_order __m = memory_order_seq_cst) noexcept
  547. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  548. _GLIBCXX_ALWAYS_INLINE __int_type
  549. fetch_or(__int_type __i,
  550. memory_order __m = memory_order_seq_cst) volatile noexcept
  551. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  552. _GLIBCXX_ALWAYS_INLINE __int_type
  553. fetch_xor(__int_type __i,
  554. memory_order __m = memory_order_seq_cst) noexcept
  555. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  556. _GLIBCXX_ALWAYS_INLINE __int_type
  557. fetch_xor(__int_type __i,
  558. memory_order __m = memory_order_seq_cst) volatile noexcept
  559. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  560. };
  561. /// Partial specialization for pointer types.
  562. template<typename _PTp>
  563. struct __atomic_base<_PTp*>
  564. {
  565. private:
  566. typedef _PTp* __pointer_type;
  567. __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
  568. // Factored out to facilitate explicit specialization.
  569. constexpr ptrdiff_t
  570. _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
  571. constexpr ptrdiff_t
  572. _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
  573. public:
  574. __atomic_base() noexcept = default;
  575. ~__atomic_base() noexcept = default;
  576. __atomic_base(const __atomic_base&) = delete;
  577. __atomic_base& operator=(const __atomic_base&) = delete;
  578. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  579. // Requires __pointer_type convertible to _M_p.
  580. constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
  581. operator __pointer_type() const noexcept
  582. { return load(); }
  583. operator __pointer_type() const volatile noexcept
  584. { return load(); }
  585. __pointer_type
  586. operator=(__pointer_type __p) noexcept
  587. {
  588. store(__p);
  589. return __p;
  590. }
  591. __pointer_type
  592. operator=(__pointer_type __p) volatile noexcept
  593. {
  594. store(__p);
  595. return __p;
  596. }
  597. __pointer_type
  598. operator++(int) noexcept
  599. { return fetch_add(1); }
  600. __pointer_type
  601. operator++(int) volatile noexcept
  602. { return fetch_add(1); }
  603. __pointer_type
  604. operator--(int) noexcept
  605. { return fetch_sub(1); }
  606. __pointer_type
  607. operator--(int) volatile noexcept
  608. { return fetch_sub(1); }
  609. __pointer_type
  610. operator++() noexcept
  611. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  612. int(memory_order_seq_cst)); }
  613. __pointer_type
  614. operator++() volatile noexcept
  615. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  616. int(memory_order_seq_cst)); }
  617. __pointer_type
  618. operator--() noexcept
  619. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  620. int(memory_order_seq_cst)); }
  621. __pointer_type
  622. operator--() volatile noexcept
  623. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  624. int(memory_order_seq_cst)); }
  625. __pointer_type
  626. operator+=(ptrdiff_t __d) noexcept
  627. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  628. int(memory_order_seq_cst)); }
  629. __pointer_type
  630. operator+=(ptrdiff_t __d) volatile noexcept
  631. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  632. int(memory_order_seq_cst)); }
  633. __pointer_type
  634. operator-=(ptrdiff_t __d) noexcept
  635. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  636. int(memory_order_seq_cst)); }
  637. __pointer_type
  638. operator-=(ptrdiff_t __d) volatile noexcept
  639. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  640. int(memory_order_seq_cst)); }
  641. bool
  642. is_lock_free() const noexcept
  643. {
  644. // Produce a fake, minimally aligned pointer.
  645. return __atomic_is_lock_free(sizeof(_M_p),
  646. reinterpret_cast<void *>(-__alignof(_M_p)));
  647. }
  648. bool
  649. is_lock_free() const volatile noexcept
  650. {
  651. // Produce a fake, minimally aligned pointer.
  652. return __atomic_is_lock_free(sizeof(_M_p),
  653. reinterpret_cast<void *>(-__alignof(_M_p)));
  654. }
  655. _GLIBCXX_ALWAYS_INLINE void
  656. store(__pointer_type __p,
  657. memory_order __m = memory_order_seq_cst) noexcept
  658. {
  659. memory_order __b __attribute__ ((__unused__))
  660. = __m & __memory_order_mask;
  661. __glibcxx_assert(__b != memory_order_acquire);
  662. __glibcxx_assert(__b != memory_order_acq_rel);
  663. __glibcxx_assert(__b != memory_order_consume);
  664. __atomic_store_n(&_M_p, __p, int(__m));
  665. }
  666. _GLIBCXX_ALWAYS_INLINE void
  667. store(__pointer_type __p,
  668. memory_order __m = memory_order_seq_cst) volatile noexcept
  669. {
  670. memory_order __b __attribute__ ((__unused__))
  671. = __m & __memory_order_mask;
  672. __glibcxx_assert(__b != memory_order_acquire);
  673. __glibcxx_assert(__b != memory_order_acq_rel);
  674. __glibcxx_assert(__b != memory_order_consume);
  675. __atomic_store_n(&_M_p, __p, int(__m));
  676. }
  677. _GLIBCXX_ALWAYS_INLINE __pointer_type
  678. load(memory_order __m = memory_order_seq_cst) const noexcept
  679. {
  680. memory_order __b __attribute__ ((__unused__))
  681. = __m & __memory_order_mask;
  682. __glibcxx_assert(__b != memory_order_release);
  683. __glibcxx_assert(__b != memory_order_acq_rel);
  684. return __atomic_load_n(&_M_p, int(__m));
  685. }
  686. _GLIBCXX_ALWAYS_INLINE __pointer_type
  687. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  688. {
  689. memory_order __b __attribute__ ((__unused__))
  690. = __m & __memory_order_mask;
  691. __glibcxx_assert(__b != memory_order_release);
  692. __glibcxx_assert(__b != memory_order_acq_rel);
  693. return __atomic_load_n(&_M_p, int(__m));
  694. }
  695. _GLIBCXX_ALWAYS_INLINE __pointer_type
  696. exchange(__pointer_type __p,
  697. memory_order __m = memory_order_seq_cst) noexcept
  698. {
  699. return __atomic_exchange_n(&_M_p, __p, int(__m));
  700. }
  701. _GLIBCXX_ALWAYS_INLINE __pointer_type
  702. exchange(__pointer_type __p,
  703. memory_order __m = memory_order_seq_cst) volatile noexcept
  704. {
  705. return __atomic_exchange_n(&_M_p, __p, int(__m));
  706. }
  707. _GLIBCXX_ALWAYS_INLINE bool
  708. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  709. memory_order __m1,
  710. memory_order __m2) noexcept
  711. {
  712. memory_order __b2 __attribute__ ((__unused__))
  713. = __m2 & __memory_order_mask;
  714. memory_order __b1 __attribute__ ((__unused__))
  715. = __m1 & __memory_order_mask;
  716. __glibcxx_assert(__b2 != memory_order_release);
  717. __glibcxx_assert(__b2 != memory_order_acq_rel);
  718. __glibcxx_assert(__b2 <= __b1);
  719. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  720. int(__m1), int(__m2));
  721. }
  722. _GLIBCXX_ALWAYS_INLINE bool
  723. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  724. memory_order __m1,
  725. memory_order __m2) volatile noexcept
  726. {
  727. memory_order __b2 __attribute__ ((__unused__))
  728. = __m2 & __memory_order_mask;
  729. memory_order __b1 __attribute__ ((__unused__))
  730. = __m1 & __memory_order_mask;
  731. __glibcxx_assert(__b2 != memory_order_release);
  732. __glibcxx_assert(__b2 != memory_order_acq_rel);
  733. __glibcxx_assert(__b2 <= __b1);
  734. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  735. int(__m1), int(__m2));
  736. }
  737. #if __cpp_lib_atomic_wait
  738. _GLIBCXX_ALWAYS_INLINE void
  739. wait(__pointer_type __old,
  740. memory_order __m = memory_order_seq_cst) noexcept
  741. {
  742. std::__atomic_wait_address_v(&_M_p, __old,
  743. [__m, this]
  744. { return this->load(__m); });
  745. }
  746. // TODO add const volatile overload
  747. _GLIBCXX_ALWAYS_INLINE void
  748. notify_one() const noexcept
  749. { std::__atomic_notify_address(&_M_p, false); }
  750. // TODO add const volatile overload
  751. _GLIBCXX_ALWAYS_INLINE void
  752. notify_all() const noexcept
  753. { std::__atomic_notify_address(&_M_p, true); }
  754. // TODO add const volatile overload
  755. #endif // __cpp_lib_atomic_wait
  756. _GLIBCXX_ALWAYS_INLINE __pointer_type
  757. fetch_add(ptrdiff_t __d,
  758. memory_order __m = memory_order_seq_cst) noexcept
  759. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  760. _GLIBCXX_ALWAYS_INLINE __pointer_type
  761. fetch_add(ptrdiff_t __d,
  762. memory_order __m = memory_order_seq_cst) volatile noexcept
  763. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  764. _GLIBCXX_ALWAYS_INLINE __pointer_type
  765. fetch_sub(ptrdiff_t __d,
  766. memory_order __m = memory_order_seq_cst) noexcept
  767. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  768. _GLIBCXX_ALWAYS_INLINE __pointer_type
  769. fetch_sub(ptrdiff_t __d,
  770. memory_order __m = memory_order_seq_cst) volatile noexcept
  771. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  772. };
  773. #if __cplusplus > 201703L
  774. // Implementation details of atomic_ref and atomic<floating-point>.
  775. namespace __atomic_impl
  776. {
  777. // Remove volatile and create a non-deduced context for value arguments.
  778. template<typename _Tp>
  779. using _Val = remove_volatile_t<_Tp>;
  780. // As above, but for difference_type arguments.
  781. template<typename _Tp>
  782. using _Diff = conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
  783. template<size_t _Size, size_t _Align>
  784. _GLIBCXX_ALWAYS_INLINE bool
  785. is_lock_free() noexcept
  786. {
  787. // Produce a fake, minimally aligned pointer.
  788. return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
  789. }
  790. template<typename _Tp>
  791. _GLIBCXX_ALWAYS_INLINE void
  792. store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
  793. { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
  794. template<typename _Tp>
  795. _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
  796. load(const _Tp* __ptr, memory_order __m) noexcept
  797. {
  798. alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
  799. auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
  800. __atomic_load(__ptr, __dest, int(__m));
  801. return *__dest;
  802. }
  803. template<typename _Tp>
  804. _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
  805. exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
  806. {
  807. alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
  808. auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
  809. __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
  810. return *__dest;
  811. }
  812. template<typename _Tp>
  813. _GLIBCXX_ALWAYS_INLINE bool
  814. compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
  815. _Val<_Tp> __desired, memory_order __success,
  816. memory_order __failure) noexcept
  817. {
  818. return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
  819. std::__addressof(__desired), true,
  820. int(__success), int(__failure));
  821. }
  822. template<typename _Tp>
  823. _GLIBCXX_ALWAYS_INLINE bool
  824. compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
  825. _Val<_Tp> __desired, memory_order __success,
  826. memory_order __failure) noexcept
  827. {
  828. return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
  829. std::__addressof(__desired), false,
  830. int(__success), int(__failure));
  831. }
  832. #if __cpp_lib_atomic_wait
  833. template<typename _Tp>
  834. _GLIBCXX_ALWAYS_INLINE void
  835. wait(const _Tp* __ptr, _Val<_Tp> __old,
  836. memory_order __m = memory_order_seq_cst) noexcept
  837. {
  838. std::__atomic_wait_address_v(__ptr, __old,
  839. [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
  840. }
  841. // TODO add const volatile overload
  842. template<typename _Tp>
  843. _GLIBCXX_ALWAYS_INLINE void
  844. notify_one(const _Tp* __ptr) noexcept
  845. { std::__atomic_notify_address(__ptr, false); }
  846. // TODO add const volatile overload
  847. template<typename _Tp>
  848. _GLIBCXX_ALWAYS_INLINE void
  849. notify_all(const _Tp* __ptr) noexcept
  850. { std::__atomic_notify_address(__ptr, true); }
  851. // TODO add const volatile overload
  852. #endif // __cpp_lib_atomic_wait
  853. template<typename _Tp>
  854. _GLIBCXX_ALWAYS_INLINE _Tp
  855. fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
  856. { return __atomic_fetch_add(__ptr, __i, int(__m)); }
  857. template<typename _Tp>
  858. _GLIBCXX_ALWAYS_INLINE _Tp
  859. fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
  860. { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
  861. template<typename _Tp>
  862. _GLIBCXX_ALWAYS_INLINE _Tp
  863. fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  864. { return __atomic_fetch_and(__ptr, __i, int(__m)); }
  865. template<typename _Tp>
  866. _GLIBCXX_ALWAYS_INLINE _Tp
  867. fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  868. { return __atomic_fetch_or(__ptr, __i, int(__m)); }
  869. template<typename _Tp>
  870. _GLIBCXX_ALWAYS_INLINE _Tp
  871. fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  872. { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
  873. template<typename _Tp>
  874. _GLIBCXX_ALWAYS_INLINE _Tp
  875. __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
  876. { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  877. template<typename _Tp>
  878. _GLIBCXX_ALWAYS_INLINE _Tp
  879. __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
  880. { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  881. template<typename _Tp>
  882. _GLIBCXX_ALWAYS_INLINE _Tp
  883. __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  884. { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  885. template<typename _Tp>
  886. _GLIBCXX_ALWAYS_INLINE _Tp
  887. __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  888. { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  889. template<typename _Tp>
  890. _GLIBCXX_ALWAYS_INLINE _Tp
  891. __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  892. { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  893. template<typename _Tp>
  894. _Tp
  895. __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  896. {
  897. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  898. _Val<_Tp> __newval = __oldval + __i;
  899. while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
  900. memory_order_relaxed))
  901. __newval = __oldval + __i;
  902. return __oldval;
  903. }
  904. template<typename _Tp>
  905. _Tp
  906. __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  907. {
  908. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  909. _Val<_Tp> __newval = __oldval - __i;
  910. while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
  911. memory_order_relaxed))
  912. __newval = __oldval - __i;
  913. return __oldval;
  914. }
  915. template<typename _Tp>
  916. _Tp
  917. __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
  918. {
  919. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  920. _Val<_Tp> __newval = __oldval + __i;
  921. while (!compare_exchange_weak(__ptr, __oldval, __newval,
  922. memory_order_seq_cst,
  923. memory_order_relaxed))
  924. __newval = __oldval + __i;
  925. return __newval;
  926. }
  927. template<typename _Tp>
  928. _Tp
  929. __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
  930. {
  931. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  932. _Val<_Tp> __newval = __oldval - __i;
  933. while (!compare_exchange_weak(__ptr, __oldval, __newval,
  934. memory_order_seq_cst,
  935. memory_order_relaxed))
  936. __newval = __oldval - __i;
  937. return __newval;
  938. }
  939. } // namespace __atomic_impl
  940. // base class for atomic<floating-point-type>
  941. template<typename _Fp>
  942. struct __atomic_float
  943. {
  944. static_assert(is_floating_point_v<_Fp>);
  945. static constexpr size_t _S_alignment = __alignof__(_Fp);
  946. public:
  947. using value_type = _Fp;
  948. using difference_type = value_type;
  949. static constexpr bool is_always_lock_free
  950. = __atomic_always_lock_free(sizeof(_Fp), 0);
  951. __atomic_float() = default;
  952. constexpr
  953. __atomic_float(_Fp __t) : _M_fp(__t)
  954. { }
  955. __atomic_float(const __atomic_float&) = delete;
  956. __atomic_float& operator=(const __atomic_float&) = delete;
  957. __atomic_float& operator=(const __atomic_float&) volatile = delete;
  958. _Fp
  959. operator=(_Fp __t) volatile noexcept
  960. {
  961. this->store(__t);
  962. return __t;
  963. }
  964. _Fp
  965. operator=(_Fp __t) noexcept
  966. {
  967. this->store(__t);
  968. return __t;
  969. }
  970. bool
  971. is_lock_free() const volatile noexcept
  972. { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
  973. bool
  974. is_lock_free() const noexcept
  975. { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
  976. void
  977. store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
  978. { __atomic_impl::store(&_M_fp, __t, __m); }
  979. void
  980. store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
  981. { __atomic_impl::store(&_M_fp, __t, __m); }
  982. _Fp
  983. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  984. { return __atomic_impl::load(&_M_fp, __m); }
  985. _Fp
  986. load(memory_order __m = memory_order_seq_cst) const noexcept
  987. { return __atomic_impl::load(&_M_fp, __m); }
  988. operator _Fp() const volatile noexcept { return this->load(); }
  989. operator _Fp() const noexcept { return this->load(); }
  990. _Fp
  991. exchange(_Fp __desired,
  992. memory_order __m = memory_order_seq_cst) volatile noexcept
  993. { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
  994. _Fp
  995. exchange(_Fp __desired,
  996. memory_order __m = memory_order_seq_cst) noexcept
  997. { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
  998. bool
  999. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1000. memory_order __success,
  1001. memory_order __failure) noexcept
  1002. {
  1003. return __atomic_impl::compare_exchange_weak(&_M_fp,
  1004. __expected, __desired,
  1005. __success, __failure);
  1006. }
  1007. bool
  1008. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1009. memory_order __success,
  1010. memory_order __failure) volatile noexcept
  1011. {
  1012. return __atomic_impl::compare_exchange_weak(&_M_fp,
  1013. __expected, __desired,
  1014. __success, __failure);
  1015. }
  1016. bool
  1017. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1018. memory_order __success,
  1019. memory_order __failure) noexcept
  1020. {
  1021. return __atomic_impl::compare_exchange_strong(&_M_fp,
  1022. __expected, __desired,
  1023. __success, __failure);
  1024. }
  1025. bool
  1026. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1027. memory_order __success,
  1028. memory_order __failure) volatile noexcept
  1029. {
  1030. return __atomic_impl::compare_exchange_strong(&_M_fp,
  1031. __expected, __desired,
  1032. __success, __failure);
  1033. }
  1034. bool
  1035. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1036. memory_order __order = memory_order_seq_cst)
  1037. noexcept
  1038. {
  1039. return compare_exchange_weak(__expected, __desired, __order,
  1040. __cmpexch_failure_order(__order));
  1041. }
  1042. bool
  1043. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1044. memory_order __order = memory_order_seq_cst)
  1045. volatile noexcept
  1046. {
  1047. return compare_exchange_weak(__expected, __desired, __order,
  1048. __cmpexch_failure_order(__order));
  1049. }
  1050. bool
  1051. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1052. memory_order __order = memory_order_seq_cst)
  1053. noexcept
  1054. {
  1055. return compare_exchange_strong(__expected, __desired, __order,
  1056. __cmpexch_failure_order(__order));
  1057. }
  1058. bool
  1059. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1060. memory_order __order = memory_order_seq_cst)
  1061. volatile noexcept
  1062. {
  1063. return compare_exchange_strong(__expected, __desired, __order,
  1064. __cmpexch_failure_order(__order));
  1065. }
  1066. #if __cpp_lib_atomic_wait
  1067. _GLIBCXX_ALWAYS_INLINE void
  1068. wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1069. { __atomic_impl::wait(&_M_fp, __old, __m); }
  1070. // TODO add const volatile overload
  1071. _GLIBCXX_ALWAYS_INLINE void
  1072. notify_one() const noexcept
  1073. { __atomic_impl::notify_one(&_M_fp); }
  1074. // TODO add const volatile overload
  1075. _GLIBCXX_ALWAYS_INLINE void
  1076. notify_all() const noexcept
  1077. { __atomic_impl::notify_all(&_M_fp); }
  1078. // TODO add const volatile overload
  1079. #endif // __cpp_lib_atomic_wait
  1080. value_type
  1081. fetch_add(value_type __i,
  1082. memory_order __m = memory_order_seq_cst) noexcept
  1083. { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
  1084. value_type
  1085. fetch_add(value_type __i,
  1086. memory_order __m = memory_order_seq_cst) volatile noexcept
  1087. { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
  1088. value_type
  1089. fetch_sub(value_type __i,
  1090. memory_order __m = memory_order_seq_cst) noexcept
  1091. { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
  1092. value_type
  1093. fetch_sub(value_type __i,
  1094. memory_order __m = memory_order_seq_cst) volatile noexcept
  1095. { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
  1096. value_type
  1097. operator+=(value_type __i) noexcept
  1098. { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
  1099. value_type
  1100. operator+=(value_type __i) volatile noexcept
  1101. { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
  1102. value_type
  1103. operator-=(value_type __i) noexcept
  1104. { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
  1105. value_type
  1106. operator-=(value_type __i) volatile noexcept
  1107. { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
  1108. private:
  1109. alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
  1110. };
  1111. #undef _GLIBCXX20_INIT
  1112. template<typename _Tp,
  1113. bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
  1114. struct __atomic_ref;
  1115. // base class for non-integral, non-floating-point, non-pointer types
  1116. template<typename _Tp>
  1117. struct __atomic_ref<_Tp, false, false>
  1118. {
  1119. static_assert(is_trivially_copyable_v<_Tp>);
  1120. // 1/2/4/8/16-byte types must be aligned to at least their size.
  1121. static constexpr int _S_min_alignment
  1122. = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
  1123. ? 0 : sizeof(_Tp);
  1124. public:
  1125. using value_type = _Tp;
  1126. static constexpr bool is_always_lock_free
  1127. = __atomic_always_lock_free(sizeof(_Tp), 0);
  1128. static constexpr size_t required_alignment
  1129. = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
  1130. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1131. explicit
  1132. __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
  1133. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1134. __atomic_ref(const __atomic_ref&) noexcept = default;
  1135. _Tp
  1136. operator=(_Tp __t) const noexcept
  1137. {
  1138. this->store(__t);
  1139. return __t;
  1140. }
  1141. operator _Tp() const noexcept { return this->load(); }
  1142. bool
  1143. is_lock_free() const noexcept
  1144. { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
  1145. void
  1146. store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1147. { __atomic_impl::store(_M_ptr, __t, __m); }
  1148. _Tp
  1149. load(memory_order __m = memory_order_seq_cst) const noexcept
  1150. { return __atomic_impl::load(_M_ptr, __m); }
  1151. _Tp
  1152. exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
  1153. const noexcept
  1154. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1155. bool
  1156. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1157. memory_order __success,
  1158. memory_order __failure) const noexcept
  1159. {
  1160. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1161. __expected, __desired,
  1162. __success, __failure);
  1163. }
  1164. bool
  1165. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1166. memory_order __success,
  1167. memory_order __failure) const noexcept
  1168. {
  1169. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1170. __expected, __desired,
  1171. __success, __failure);
  1172. }
  1173. bool
  1174. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1175. memory_order __order = memory_order_seq_cst)
  1176. const noexcept
  1177. {
  1178. return compare_exchange_weak(__expected, __desired, __order,
  1179. __cmpexch_failure_order(__order));
  1180. }
  1181. bool
  1182. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1183. memory_order __order = memory_order_seq_cst)
  1184. const noexcept
  1185. {
  1186. return compare_exchange_strong(__expected, __desired, __order,
  1187. __cmpexch_failure_order(__order));
  1188. }
  1189. #if __cpp_lib_atomic_wait
  1190. _GLIBCXX_ALWAYS_INLINE void
  1191. wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1192. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1193. // TODO add const volatile overload
  1194. _GLIBCXX_ALWAYS_INLINE void
  1195. notify_one() const noexcept
  1196. { __atomic_impl::notify_one(_M_ptr); }
  1197. // TODO add const volatile overload
  1198. _GLIBCXX_ALWAYS_INLINE void
  1199. notify_all() const noexcept
  1200. { __atomic_impl::notify_all(_M_ptr); }
  1201. // TODO add const volatile overload
  1202. #endif // __cpp_lib_atomic_wait
  1203. private:
  1204. _Tp* _M_ptr;
  1205. };
  1206. // base class for atomic_ref<integral-type>
  1207. template<typename _Tp>
  1208. struct __atomic_ref<_Tp, true, false>
  1209. {
  1210. static_assert(is_integral_v<_Tp>);
  1211. public:
  1212. using value_type = _Tp;
  1213. using difference_type = value_type;
  1214. static constexpr bool is_always_lock_free
  1215. = __atomic_always_lock_free(sizeof(_Tp), 0);
  1216. static constexpr size_t required_alignment
  1217. = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
  1218. __atomic_ref() = delete;
  1219. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1220. explicit
  1221. __atomic_ref(_Tp& __t) : _M_ptr(&__t)
  1222. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1223. __atomic_ref(const __atomic_ref&) noexcept = default;
  1224. _Tp
  1225. operator=(_Tp __t) const noexcept
  1226. {
  1227. this->store(__t);
  1228. return __t;
  1229. }
  1230. operator _Tp() const noexcept { return this->load(); }
  1231. bool
  1232. is_lock_free() const noexcept
  1233. {
  1234. return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
  1235. }
  1236. void
  1237. store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1238. { __atomic_impl::store(_M_ptr, __t, __m); }
  1239. _Tp
  1240. load(memory_order __m = memory_order_seq_cst) const noexcept
  1241. { return __atomic_impl::load(_M_ptr, __m); }
  1242. _Tp
  1243. exchange(_Tp __desired,
  1244. memory_order __m = memory_order_seq_cst) const noexcept
  1245. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1246. bool
  1247. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1248. memory_order __success,
  1249. memory_order __failure) const noexcept
  1250. {
  1251. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1252. __expected, __desired,
  1253. __success, __failure);
  1254. }
  1255. bool
  1256. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1257. memory_order __success,
  1258. memory_order __failure) const noexcept
  1259. {
  1260. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1261. __expected, __desired,
  1262. __success, __failure);
  1263. }
  1264. bool
  1265. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1266. memory_order __order = memory_order_seq_cst)
  1267. const noexcept
  1268. {
  1269. return compare_exchange_weak(__expected, __desired, __order,
  1270. __cmpexch_failure_order(__order));
  1271. }
  1272. bool
  1273. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1274. memory_order __order = memory_order_seq_cst)
  1275. const noexcept
  1276. {
  1277. return compare_exchange_strong(__expected, __desired, __order,
  1278. __cmpexch_failure_order(__order));
  1279. }
  1280. #if __cpp_lib_atomic_wait
  1281. _GLIBCXX_ALWAYS_INLINE void
  1282. wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1283. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1284. // TODO add const volatile overload
  1285. _GLIBCXX_ALWAYS_INLINE void
  1286. notify_one() const noexcept
  1287. { __atomic_impl::notify_one(_M_ptr); }
  1288. // TODO add const volatile overload
  1289. _GLIBCXX_ALWAYS_INLINE void
  1290. notify_all() const noexcept
  1291. { __atomic_impl::notify_all(_M_ptr); }
  1292. // TODO add const volatile overload
  1293. #endif // __cpp_lib_atomic_wait
  1294. value_type
  1295. fetch_add(value_type __i,
  1296. memory_order __m = memory_order_seq_cst) const noexcept
  1297. { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
  1298. value_type
  1299. fetch_sub(value_type __i,
  1300. memory_order __m = memory_order_seq_cst) const noexcept
  1301. { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
  1302. value_type
  1303. fetch_and(value_type __i,
  1304. memory_order __m = memory_order_seq_cst) const noexcept
  1305. { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
  1306. value_type
  1307. fetch_or(value_type __i,
  1308. memory_order __m = memory_order_seq_cst) const noexcept
  1309. { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
  1310. value_type
  1311. fetch_xor(value_type __i,
  1312. memory_order __m = memory_order_seq_cst) const noexcept
  1313. { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
  1314. _GLIBCXX_ALWAYS_INLINE value_type
  1315. operator++(int) const noexcept
  1316. { return fetch_add(1); }
  1317. _GLIBCXX_ALWAYS_INLINE value_type
  1318. operator--(int) const noexcept
  1319. { return fetch_sub(1); }
  1320. value_type
  1321. operator++() const noexcept
  1322. { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
  1323. value_type
  1324. operator--() const noexcept
  1325. { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
  1326. value_type
  1327. operator+=(value_type __i) const noexcept
  1328. { return __atomic_impl::__add_fetch(_M_ptr, __i); }
  1329. value_type
  1330. operator-=(value_type __i) const noexcept
  1331. { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
  1332. value_type
  1333. operator&=(value_type __i) const noexcept
  1334. { return __atomic_impl::__and_fetch(_M_ptr, __i); }
  1335. value_type
  1336. operator|=(value_type __i) const noexcept
  1337. { return __atomic_impl::__or_fetch(_M_ptr, __i); }
  1338. value_type
  1339. operator^=(value_type __i) const noexcept
  1340. { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
  1341. private:
  1342. _Tp* _M_ptr;
  1343. };
  1344. // base class for atomic_ref<floating-point-type>
  1345. template<typename _Fp>
  1346. struct __atomic_ref<_Fp, false, true>
  1347. {
  1348. static_assert(is_floating_point_v<_Fp>);
  1349. public:
  1350. using value_type = _Fp;
  1351. using difference_type = value_type;
  1352. static constexpr bool is_always_lock_free
  1353. = __atomic_always_lock_free(sizeof(_Fp), 0);
  1354. static constexpr size_t required_alignment = __alignof__(_Fp);
  1355. __atomic_ref() = delete;
  1356. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1357. explicit
  1358. __atomic_ref(_Fp& __t) : _M_ptr(&__t)
  1359. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1360. __atomic_ref(const __atomic_ref&) noexcept = default;
  1361. _Fp
  1362. operator=(_Fp __t) const noexcept
  1363. {
  1364. this->store(__t);
  1365. return __t;
  1366. }
  1367. operator _Fp() const noexcept { return this->load(); }
  1368. bool
  1369. is_lock_free() const noexcept
  1370. {
  1371. return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
  1372. }
  1373. void
  1374. store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1375. { __atomic_impl::store(_M_ptr, __t, __m); }
  1376. _Fp
  1377. load(memory_order __m = memory_order_seq_cst) const noexcept
  1378. { return __atomic_impl::load(_M_ptr, __m); }
  1379. _Fp
  1380. exchange(_Fp __desired,
  1381. memory_order __m = memory_order_seq_cst) const noexcept
  1382. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1383. bool
  1384. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1385. memory_order __success,
  1386. memory_order __failure) const noexcept
  1387. {
  1388. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1389. __expected, __desired,
  1390. __success, __failure);
  1391. }
  1392. bool
  1393. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1394. memory_order __success,
  1395. memory_order __failure) const noexcept
  1396. {
  1397. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1398. __expected, __desired,
  1399. __success, __failure);
  1400. }
  1401. bool
  1402. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1403. memory_order __order = memory_order_seq_cst)
  1404. const noexcept
  1405. {
  1406. return compare_exchange_weak(__expected, __desired, __order,
  1407. __cmpexch_failure_order(__order));
  1408. }
  1409. bool
  1410. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1411. memory_order __order = memory_order_seq_cst)
  1412. const noexcept
  1413. {
  1414. return compare_exchange_strong(__expected, __desired, __order,
  1415. __cmpexch_failure_order(__order));
  1416. }
  1417. #if __cpp_lib_atomic_wait
  1418. _GLIBCXX_ALWAYS_INLINE void
  1419. wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1420. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1421. // TODO add const volatile overload
  1422. _GLIBCXX_ALWAYS_INLINE void
  1423. notify_one() const noexcept
  1424. { __atomic_impl::notify_one(_M_ptr); }
  1425. // TODO add const volatile overload
  1426. _GLIBCXX_ALWAYS_INLINE void
  1427. notify_all() const noexcept
  1428. { __atomic_impl::notify_all(_M_ptr); }
  1429. // TODO add const volatile overload
  1430. #endif // __cpp_lib_atomic_wait
  1431. value_type
  1432. fetch_add(value_type __i,
  1433. memory_order __m = memory_order_seq_cst) const noexcept
  1434. { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
  1435. value_type
  1436. fetch_sub(value_type __i,
  1437. memory_order __m = memory_order_seq_cst) const noexcept
  1438. { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
  1439. value_type
  1440. operator+=(value_type __i) const noexcept
  1441. { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
  1442. value_type
  1443. operator-=(value_type __i) const noexcept
  1444. { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
  1445. private:
  1446. _Fp* _M_ptr;
  1447. };
  1448. // base class for atomic_ref<pointer-type>
  1449. template<typename _Tp>
  1450. struct __atomic_ref<_Tp*, false, false>
  1451. {
  1452. public:
  1453. using value_type = _Tp*;
  1454. using difference_type = ptrdiff_t;
  1455. static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
  1456. static constexpr size_t required_alignment = __alignof__(_Tp*);
  1457. __atomic_ref() = delete;
  1458. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1459. explicit
  1460. __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
  1461. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1462. __atomic_ref(const __atomic_ref&) noexcept = default;
  1463. _Tp*
  1464. operator=(_Tp* __t) const noexcept
  1465. {
  1466. this->store(__t);
  1467. return __t;
  1468. }
  1469. operator _Tp*() const noexcept { return this->load(); }
  1470. bool
  1471. is_lock_free() const noexcept
  1472. {
  1473. return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
  1474. }
  1475. void
  1476. store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
  1477. { __atomic_impl::store(_M_ptr, __t, __m); }
  1478. _Tp*
  1479. load(memory_order __m = memory_order_seq_cst) const noexcept
  1480. { return __atomic_impl::load(_M_ptr, __m); }
  1481. _Tp*
  1482. exchange(_Tp* __desired,
  1483. memory_order __m = memory_order_seq_cst) const noexcept
  1484. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1485. bool
  1486. compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
  1487. memory_order __success,
  1488. memory_order __failure) const noexcept
  1489. {
  1490. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1491. __expected, __desired,
  1492. __success, __failure);
  1493. }
  1494. bool
  1495. compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
  1496. memory_order __success,
  1497. memory_order __failure) const noexcept
  1498. {
  1499. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1500. __expected, __desired,
  1501. __success, __failure);
  1502. }
  1503. bool
  1504. compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
  1505. memory_order __order = memory_order_seq_cst)
  1506. const noexcept
  1507. {
  1508. return compare_exchange_weak(__expected, __desired, __order,
  1509. __cmpexch_failure_order(__order));
  1510. }
  1511. bool
  1512. compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
  1513. memory_order __order = memory_order_seq_cst)
  1514. const noexcept
  1515. {
  1516. return compare_exchange_strong(__expected, __desired, __order,
  1517. __cmpexch_failure_order(__order));
  1518. }
  1519. #if __cpp_lib_atomic_wait
  1520. _GLIBCXX_ALWAYS_INLINE void
  1521. wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1522. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1523. // TODO add const volatile overload
  1524. _GLIBCXX_ALWAYS_INLINE void
  1525. notify_one() const noexcept
  1526. { __atomic_impl::notify_one(_M_ptr); }
  1527. // TODO add const volatile overload
  1528. _GLIBCXX_ALWAYS_INLINE void
  1529. notify_all() const noexcept
  1530. { __atomic_impl::notify_all(_M_ptr); }
  1531. // TODO add const volatile overload
  1532. #endif // __cpp_lib_atomic_wait
  1533. _GLIBCXX_ALWAYS_INLINE value_type
  1534. fetch_add(difference_type __d,
  1535. memory_order __m = memory_order_seq_cst) const noexcept
  1536. { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
  1537. _GLIBCXX_ALWAYS_INLINE value_type
  1538. fetch_sub(difference_type __d,
  1539. memory_order __m = memory_order_seq_cst) const noexcept
  1540. { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
  1541. value_type
  1542. operator++(int) const noexcept
  1543. { return fetch_add(1); }
  1544. value_type
  1545. operator--(int) const noexcept
  1546. { return fetch_sub(1); }
  1547. value_type
  1548. operator++() const noexcept
  1549. {
  1550. return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
  1551. }
  1552. value_type
  1553. operator--() const noexcept
  1554. {
  1555. return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
  1556. }
  1557. value_type
  1558. operator+=(difference_type __d) const noexcept
  1559. {
  1560. return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
  1561. }
  1562. value_type
  1563. operator-=(difference_type __d) const noexcept
  1564. {
  1565. return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
  1566. }
  1567. private:
  1568. static constexpr ptrdiff_t
  1569. _S_type_size(ptrdiff_t __d) noexcept
  1570. {
  1571. static_assert(is_object_v<_Tp>);
  1572. return __d * sizeof(_Tp);
  1573. }
  1574. _Tp** _M_ptr;
  1575. };
  1576. #endif // C++2a
  1577. /// @} group atomics
  1578. _GLIBCXX_END_NAMESPACE_VERSION
  1579. } // namespace std
  1580. #endif