mutex 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. // <mutex> -*- C++ -*-
  2. // Copyright (C) 2003-2021 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_MUTEX
  24. #define _GLIBCXX_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus < 201103L
  27. # include <bits/c++0x_warning.h>
  28. #else
  29. #include <tuple>
  30. #include <chrono>
  31. #include <exception>
  32. #include <type_traits>
  33. #include <system_error>
  34. #include <bits/std_mutex.h>
  35. #include <bits/unique_lock.h>
  36. #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
  37. # include <condition_variable>
  38. # include <thread>
  39. #endif
  40. #include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
  41. #if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
  42. # include <bits/std_function.h> // std::function
  43. #endif
  44. namespace std _GLIBCXX_VISIBILITY(default)
  45. {
  46. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  47. /**
  48. * @addtogroup mutexes
  49. * @{
  50. */
  51. #ifdef _GLIBCXX_HAS_GTHREADS
  52. // Common base class for std::recursive_mutex and std::recursive_timed_mutex
  53. class __recursive_mutex_base
  54. {
  55. protected:
  56. typedef __gthread_recursive_mutex_t __native_type;
  57. __recursive_mutex_base(const __recursive_mutex_base&) = delete;
  58. __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
  59. #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
  60. __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
  61. __recursive_mutex_base() = default;
  62. #else
  63. __native_type _M_mutex;
  64. __recursive_mutex_base()
  65. {
  66. // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
  67. __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
  68. }
  69. ~__recursive_mutex_base()
  70. { __gthread_recursive_mutex_destroy(&_M_mutex); }
  71. #endif
  72. };
  73. /// The standard recursive mutex type.
  74. class recursive_mutex : private __recursive_mutex_base
  75. {
  76. public:
  77. typedef __native_type* native_handle_type;
  78. recursive_mutex() = default;
  79. ~recursive_mutex() = default;
  80. recursive_mutex(const recursive_mutex&) = delete;
  81. recursive_mutex& operator=(const recursive_mutex&) = delete;
  82. void
  83. lock()
  84. {
  85. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  86. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  87. if (__e)
  88. __throw_system_error(__e);
  89. }
  90. bool
  91. try_lock() noexcept
  92. {
  93. // XXX EINVAL, EAGAIN, EBUSY
  94. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  95. }
  96. void
  97. unlock()
  98. {
  99. // XXX EINVAL, EAGAIN, EBUSY
  100. __gthread_recursive_mutex_unlock(&_M_mutex);
  101. }
  102. native_handle_type
  103. native_handle() noexcept
  104. { return &_M_mutex; }
  105. };
  106. #if _GTHREAD_USE_MUTEX_TIMEDLOCK
  107. template<typename _Derived>
  108. class __timed_mutex_impl
  109. {
  110. protected:
  111. template<typename _Rep, typename _Period>
  112. bool
  113. _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  114. {
  115. #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  116. using __clock = chrono::steady_clock;
  117. #else
  118. using __clock = chrono::system_clock;
  119. #endif
  120. auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
  121. if (ratio_greater<__clock::period, _Period>())
  122. ++__rt;
  123. return _M_try_lock_until(__clock::now() + __rt);
  124. }
  125. template<typename _Duration>
  126. bool
  127. _M_try_lock_until(const chrono::time_point<chrono::system_clock,
  128. _Duration>& __atime)
  129. {
  130. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  131. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  132. __gthread_time_t __ts = {
  133. static_cast<std::time_t>(__s.time_since_epoch().count()),
  134. static_cast<long>(__ns.count())
  135. };
  136. return static_cast<_Derived*>(this)->_M_timedlock(__ts);
  137. }
  138. #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  139. template<typename _Duration>
  140. bool
  141. _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
  142. _Duration>& __atime)
  143. {
  144. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  145. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  146. __gthread_time_t __ts = {
  147. static_cast<std::time_t>(__s.time_since_epoch().count()),
  148. static_cast<long>(__ns.count())
  149. };
  150. return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
  151. __ts);
  152. }
  153. #endif
  154. template<typename _Clock, typename _Duration>
  155. bool
  156. _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  157. {
  158. #if __cplusplus > 201703L
  159. static_assert(chrono::is_clock_v<_Clock>);
  160. #endif
  161. // The user-supplied clock may not tick at the same rate as
  162. // steady_clock, so we must loop in order to guarantee that
  163. // the timeout has expired before returning false.
  164. auto __now = _Clock::now();
  165. do {
  166. auto __rtime = __atime - __now;
  167. if (_M_try_lock_for(__rtime))
  168. return true;
  169. __now = _Clock::now();
  170. } while (__atime > __now);
  171. return false;
  172. }
  173. };
  174. /// The standard timed mutex type.
  175. class timed_mutex
  176. : private __mutex_base, public __timed_mutex_impl<timed_mutex>
  177. {
  178. public:
  179. typedef __native_type* native_handle_type;
  180. timed_mutex() = default;
  181. ~timed_mutex() = default;
  182. timed_mutex(const timed_mutex&) = delete;
  183. timed_mutex& operator=(const timed_mutex&) = delete;
  184. void
  185. lock()
  186. {
  187. int __e = __gthread_mutex_lock(&_M_mutex);
  188. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  189. if (__e)
  190. __throw_system_error(__e);
  191. }
  192. bool
  193. try_lock() noexcept
  194. {
  195. // XXX EINVAL, EAGAIN, EBUSY
  196. return !__gthread_mutex_trylock(&_M_mutex);
  197. }
  198. template <class _Rep, class _Period>
  199. bool
  200. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  201. { return _M_try_lock_for(__rtime); }
  202. template <class _Clock, class _Duration>
  203. bool
  204. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  205. { return _M_try_lock_until(__atime); }
  206. void
  207. unlock()
  208. {
  209. // XXX EINVAL, EAGAIN, EBUSY
  210. __gthread_mutex_unlock(&_M_mutex);
  211. }
  212. native_handle_type
  213. native_handle() noexcept
  214. { return &_M_mutex; }
  215. private:
  216. friend class __timed_mutex_impl<timed_mutex>;
  217. bool
  218. _M_timedlock(const __gthread_time_t& __ts)
  219. { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
  220. #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  221. bool
  222. _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
  223. { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
  224. #endif
  225. };
  226. /// recursive_timed_mutex
  227. class recursive_timed_mutex
  228. : private __recursive_mutex_base,
  229. public __timed_mutex_impl<recursive_timed_mutex>
  230. {
  231. public:
  232. typedef __native_type* native_handle_type;
  233. recursive_timed_mutex() = default;
  234. ~recursive_timed_mutex() = default;
  235. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  236. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  237. void
  238. lock()
  239. {
  240. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  241. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  242. if (__e)
  243. __throw_system_error(__e);
  244. }
  245. bool
  246. try_lock() noexcept
  247. {
  248. // XXX EINVAL, EAGAIN, EBUSY
  249. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  250. }
  251. template <class _Rep, class _Period>
  252. bool
  253. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  254. { return _M_try_lock_for(__rtime); }
  255. template <class _Clock, class _Duration>
  256. bool
  257. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  258. { return _M_try_lock_until(__atime); }
  259. void
  260. unlock()
  261. {
  262. // XXX EINVAL, EAGAIN, EBUSY
  263. __gthread_recursive_mutex_unlock(&_M_mutex);
  264. }
  265. native_handle_type
  266. native_handle() noexcept
  267. { return &_M_mutex; }
  268. private:
  269. friend class __timed_mutex_impl<recursive_timed_mutex>;
  270. bool
  271. _M_timedlock(const __gthread_time_t& __ts)
  272. { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
  273. #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  274. bool
  275. _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
  276. { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
  277. #endif
  278. };
  279. #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
  280. /// timed_mutex
  281. class timed_mutex
  282. {
  283. mutex _M_mut;
  284. condition_variable _M_cv;
  285. bool _M_locked = false;
  286. public:
  287. timed_mutex() = default;
  288. ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
  289. timed_mutex(const timed_mutex&) = delete;
  290. timed_mutex& operator=(const timed_mutex&) = delete;
  291. void
  292. lock()
  293. {
  294. unique_lock<mutex> __lk(_M_mut);
  295. _M_cv.wait(__lk, [&]{ return !_M_locked; });
  296. _M_locked = true;
  297. }
  298. bool
  299. try_lock()
  300. {
  301. lock_guard<mutex> __lk(_M_mut);
  302. if (_M_locked)
  303. return false;
  304. _M_locked = true;
  305. return true;
  306. }
  307. template<typename _Rep, typename _Period>
  308. bool
  309. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  310. {
  311. unique_lock<mutex> __lk(_M_mut);
  312. if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
  313. return false;
  314. _M_locked = true;
  315. return true;
  316. }
  317. template<typename _Clock, typename _Duration>
  318. bool
  319. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  320. {
  321. unique_lock<mutex> __lk(_M_mut);
  322. if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
  323. return false;
  324. _M_locked = true;
  325. return true;
  326. }
  327. void
  328. unlock()
  329. {
  330. lock_guard<mutex> __lk(_M_mut);
  331. __glibcxx_assert( _M_locked );
  332. _M_locked = false;
  333. _M_cv.notify_one();
  334. }
  335. };
  336. /// recursive_timed_mutex
  337. class recursive_timed_mutex
  338. {
  339. mutex _M_mut;
  340. condition_variable _M_cv;
  341. thread::id _M_owner;
  342. unsigned _M_count = 0;
  343. // Predicate type that tests whether the current thread can lock a mutex.
  344. struct _Can_lock
  345. {
  346. // Returns true if the mutex is unlocked or is locked by _M_caller.
  347. bool
  348. operator()() const noexcept
  349. { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
  350. const recursive_timed_mutex* _M_mx;
  351. thread::id _M_caller;
  352. };
  353. public:
  354. recursive_timed_mutex() = default;
  355. ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
  356. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  357. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  358. void
  359. lock()
  360. {
  361. auto __id = this_thread::get_id();
  362. _Can_lock __can_lock{this, __id};
  363. unique_lock<mutex> __lk(_M_mut);
  364. _M_cv.wait(__lk, __can_lock);
  365. if (_M_count == -1u)
  366. __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
  367. _M_owner = __id;
  368. ++_M_count;
  369. }
  370. bool
  371. try_lock()
  372. {
  373. auto __id = this_thread::get_id();
  374. _Can_lock __can_lock{this, __id};
  375. lock_guard<mutex> __lk(_M_mut);
  376. if (!__can_lock())
  377. return false;
  378. if (_M_count == -1u)
  379. return false;
  380. _M_owner = __id;
  381. ++_M_count;
  382. return true;
  383. }
  384. template<typename _Rep, typename _Period>
  385. bool
  386. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  387. {
  388. auto __id = this_thread::get_id();
  389. _Can_lock __can_lock{this, __id};
  390. unique_lock<mutex> __lk(_M_mut);
  391. if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
  392. return false;
  393. if (_M_count == -1u)
  394. return false;
  395. _M_owner = __id;
  396. ++_M_count;
  397. return true;
  398. }
  399. template<typename _Clock, typename _Duration>
  400. bool
  401. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  402. {
  403. auto __id = this_thread::get_id();
  404. _Can_lock __can_lock{this, __id};
  405. unique_lock<mutex> __lk(_M_mut);
  406. if (!_M_cv.wait_until(__lk, __atime, __can_lock))
  407. return false;
  408. if (_M_count == -1u)
  409. return false;
  410. _M_owner = __id;
  411. ++_M_count;
  412. return true;
  413. }
  414. void
  415. unlock()
  416. {
  417. lock_guard<mutex> __lk(_M_mut);
  418. __glibcxx_assert( _M_owner == this_thread::get_id() );
  419. __glibcxx_assert( _M_count > 0 );
  420. if (--_M_count == 0)
  421. {
  422. _M_owner = {};
  423. _M_cv.notify_one();
  424. }
  425. }
  426. };
  427. #endif
  428. #endif // _GLIBCXX_HAS_GTHREADS
  429. /// @cond undocumented
  430. template<typename _Lock>
  431. inline unique_lock<_Lock>
  432. __try_to_lock(_Lock& __l)
  433. { return unique_lock<_Lock>{__l, try_to_lock}; }
  434. template<int _Idx, bool _Continue = true>
  435. struct __try_lock_impl
  436. {
  437. template<typename... _Lock>
  438. static void
  439. __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
  440. {
  441. __idx = _Idx;
  442. auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
  443. if (__lock.owns_lock())
  444. {
  445. constexpr bool __cont = _Idx + 2 < sizeof...(_Lock);
  446. using __try_locker = __try_lock_impl<_Idx + 1, __cont>;
  447. __try_locker::__do_try_lock(__locks, __idx);
  448. if (__idx == -1)
  449. __lock.release();
  450. }
  451. }
  452. };
  453. template<int _Idx>
  454. struct __try_lock_impl<_Idx, false>
  455. {
  456. template<typename... _Lock>
  457. static void
  458. __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
  459. {
  460. __idx = _Idx;
  461. auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
  462. if (__lock.owns_lock())
  463. {
  464. __idx = -1;
  465. __lock.release();
  466. }
  467. }
  468. };
  469. /// @endcond
  470. /** @brief Generic try_lock.
  471. * @param __l1 Meets Lockable requirements (try_lock() may throw).
  472. * @param __l2 Meets Lockable requirements (try_lock() may throw).
  473. * @param __l3 Meets Lockable requirements (try_lock() may throw).
  474. * @return Returns -1 if all try_lock() calls return true. Otherwise returns
  475. * a 0-based index corresponding to the argument that returned false.
  476. * @post Either all arguments are locked, or none will be.
  477. *
  478. * Sequentially calls try_lock() on each argument.
  479. */
  480. template<typename _Lock1, typename _Lock2, typename... _Lock3>
  481. int
  482. try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
  483. {
  484. int __idx;
  485. auto __locks = std::tie(__l1, __l2, __l3...);
  486. __try_lock_impl<0>::__do_try_lock(__locks, __idx);
  487. return __idx;
  488. }
  489. /** @brief Generic lock.
  490. * @param __l1 Meets Lockable requirements (try_lock() may throw).
  491. * @param __l2 Meets Lockable requirements (try_lock() may throw).
  492. * @param __l3 Meets Lockable requirements (try_lock() may throw).
  493. * @throw An exception thrown by an argument's lock() or try_lock() member.
  494. * @post All arguments are locked.
  495. *
  496. * All arguments are locked via a sequence of calls to lock(), try_lock()
  497. * and unlock(). If the call exits via an exception any locks that were
  498. * obtained will be released.
  499. */
  500. template<typename _L1, typename _L2, typename... _L3>
  501. void
  502. lock(_L1& __l1, _L2& __l2, _L3&... __l3)
  503. {
  504. while (true)
  505. {
  506. using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>;
  507. unique_lock<_L1> __first(__l1);
  508. int __idx;
  509. auto __locks = std::tie(__l2, __l3...);
  510. __try_locker::__do_try_lock(__locks, __idx);
  511. if (__idx == -1)
  512. {
  513. __first.release();
  514. return;
  515. }
  516. }
  517. }
  518. #if __cplusplus >= 201703L
  519. #define __cpp_lib_scoped_lock 201703
  520. /** @brief A scoped lock type for multiple lockable objects.
  521. *
  522. * A scoped_lock controls mutex ownership within a scope, releasing
  523. * ownership in the destructor.
  524. */
  525. template<typename... _MutexTypes>
  526. class scoped_lock
  527. {
  528. public:
  529. explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
  530. { std::lock(__m...); }
  531. explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
  532. : _M_devices(std::tie(__m...))
  533. { } // calling thread owns mutex
  534. ~scoped_lock()
  535. { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
  536. scoped_lock(const scoped_lock&) = delete;
  537. scoped_lock& operator=(const scoped_lock&) = delete;
  538. private:
  539. tuple<_MutexTypes&...> _M_devices;
  540. };
  541. template<>
  542. class scoped_lock<>
  543. {
  544. public:
  545. explicit scoped_lock() = default;
  546. explicit scoped_lock(adopt_lock_t) noexcept { }
  547. ~scoped_lock() = default;
  548. scoped_lock(const scoped_lock&) = delete;
  549. scoped_lock& operator=(const scoped_lock&) = delete;
  550. };
  551. template<typename _Mutex>
  552. class scoped_lock<_Mutex>
  553. {
  554. public:
  555. using mutex_type = _Mutex;
  556. explicit scoped_lock(mutex_type& __m) : _M_device(__m)
  557. { _M_device.lock(); }
  558. explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
  559. : _M_device(__m)
  560. { } // calling thread owns mutex
  561. ~scoped_lock()
  562. { _M_device.unlock(); }
  563. scoped_lock(const scoped_lock&) = delete;
  564. scoped_lock& operator=(const scoped_lock&) = delete;
  565. private:
  566. mutex_type& _M_device;
  567. };
  568. #endif // C++17
  569. #ifdef _GLIBCXX_HAS_GTHREADS
  570. /// Flag type used by std::call_once
  571. struct once_flag
  572. {
  573. constexpr once_flag() noexcept = default;
  574. /// Deleted copy constructor
  575. once_flag(const once_flag&) = delete;
  576. /// Deleted assignment operator
  577. once_flag& operator=(const once_flag&) = delete;
  578. private:
  579. // For gthreads targets a pthread_once_t is used with pthread_once, but
  580. // for most targets this doesn't work correctly for exceptional executions.
  581. __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
  582. struct _Prepare_execution;
  583. template<typename _Callable, typename... _Args>
  584. friend void
  585. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
  586. };
  587. /// @cond undocumented
  588. # ifdef _GLIBCXX_HAVE_TLS
  589. // If TLS is available use thread-local state for the type-erased callable
  590. // that is being run by std::call_once in the current thread.
  591. extern __thread void* __once_callable;
  592. extern __thread void (*__once_call)();
  593. // RAII type to set up state for pthread_once call.
  594. struct once_flag::_Prepare_execution
  595. {
  596. template<typename _Callable>
  597. explicit
  598. _Prepare_execution(_Callable& __c)
  599. {
  600. // Store address in thread-local pointer:
  601. __once_callable = std::__addressof(__c);
  602. // Trampoline function to invoke the closure via thread-local pointer:
  603. __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
  604. }
  605. ~_Prepare_execution()
  606. {
  607. // PR libstdc++/82481
  608. __once_callable = nullptr;
  609. __once_call = nullptr;
  610. }
  611. _Prepare_execution(const _Prepare_execution&) = delete;
  612. _Prepare_execution& operator=(const _Prepare_execution&) = delete;
  613. };
  614. # else
  615. // Without TLS use a global std::mutex and store the callable in a
  616. // global std::function.
  617. extern function<void()> __once_functor;
  618. extern void
  619. __set_once_functor_lock_ptr(unique_lock<mutex>*);
  620. extern mutex&
  621. __get_once_mutex();
  622. // RAII type to set up state for pthread_once call.
  623. struct once_flag::_Prepare_execution
  624. {
  625. template<typename _Callable>
  626. explicit
  627. _Prepare_execution(_Callable& __c)
  628. {
  629. // Store the callable in the global std::function
  630. __once_functor = __c;
  631. __set_once_functor_lock_ptr(&_M_functor_lock);
  632. }
  633. ~_Prepare_execution()
  634. {
  635. if (_M_functor_lock)
  636. __set_once_functor_lock_ptr(nullptr);
  637. }
  638. private:
  639. // XXX This deadlocks if used recursively (PR 97949)
  640. unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
  641. _Prepare_execution(const _Prepare_execution&) = delete;
  642. _Prepare_execution& operator=(const _Prepare_execution&) = delete;
  643. };
  644. # endif
  645. /// @endcond
  646. // This function is passed to pthread_once by std::call_once.
  647. // It runs __once_call() or __once_functor().
  648. extern "C" void __once_proxy(void);
  649. /// Invoke a callable and synchronize with other calls using the same flag
  650. template<typename _Callable, typename... _Args>
  651. void
  652. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
  653. {
  654. // Closure type that runs the function
  655. auto __callable = [&] {
  656. std::__invoke(std::forward<_Callable>(__f),
  657. std::forward<_Args>(__args)...);
  658. };
  659. once_flag::_Prepare_execution __exec(__callable);
  660. // XXX pthread_once does not reset the flag if an exception is thrown.
  661. if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
  662. __throw_system_error(__e);
  663. }
  664. #else // _GLIBCXX_HAS_GTHREADS
  665. /// Flag type used by std::call_once
  666. struct once_flag
  667. {
  668. constexpr once_flag() noexcept = default;
  669. /// Deleted copy constructor
  670. once_flag(const once_flag&) = delete;
  671. /// Deleted assignment operator
  672. once_flag& operator=(const once_flag&) = delete;
  673. private:
  674. // There are two different std::once_flag interfaces, abstracting four
  675. // different implementations.
  676. // The single-threaded interface uses the _M_activate() and _M_finish(bool)
  677. // functions, which start and finish an active execution respectively.
  678. // See [thread.once.callonce] in C++11 for the definition of
  679. // active/passive/returning/exceptional executions.
  680. enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
  681. int _M_once = _Bits::_Init;
  682. // Check to see if all executions will be passive now.
  683. bool
  684. _M_passive() const noexcept;
  685. // Attempts to begin an active execution.
  686. bool _M_activate();
  687. // Must be called to complete an active execution.
  688. // The argument is true if the active execution was a returning execution,
  689. // false if it was an exceptional execution.
  690. void _M_finish(bool __returning) noexcept;
  691. // RAII helper to call _M_finish.
  692. struct _Active_execution
  693. {
  694. explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
  695. ~_Active_execution() { _M_flag._M_finish(_M_returning); }
  696. _Active_execution(const _Active_execution&) = delete;
  697. _Active_execution& operator=(const _Active_execution&) = delete;
  698. once_flag& _M_flag;
  699. bool _M_returning = false;
  700. };
  701. template<typename _Callable, typename... _Args>
  702. friend void
  703. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
  704. };
  705. // Inline definitions of std::once_flag members for single-threaded targets.
  706. inline bool
  707. once_flag::_M_passive() const noexcept
  708. { return _M_once == _Bits::_Done; }
  709. inline bool
  710. once_flag::_M_activate()
  711. {
  712. if (_M_once == _Bits::_Init) [[__likely__]]
  713. {
  714. _M_once = _Bits::_Active;
  715. return true;
  716. }
  717. else if (_M_passive()) // Caller should have checked this already.
  718. return false;
  719. else
  720. __throw_system_error(EDEADLK);
  721. }
  722. inline void
  723. once_flag::_M_finish(bool __returning) noexcept
  724. { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
  725. /// Invoke a callable and synchronize with other calls using the same flag
  726. template<typename _Callable, typename... _Args>
  727. inline void
  728. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
  729. {
  730. if (__once._M_passive())
  731. return;
  732. else if (__once._M_activate())
  733. {
  734. once_flag::_Active_execution __exec(__once);
  735. // _GLIBCXX_RESOLVE_LIB_DEFECTS
  736. // 2442. call_once() shouldn't DECAY_COPY()
  737. std::__invoke(std::forward<_Callable>(__f),
  738. std::forward<_Args>(__args)...);
  739. // __f(__args...) did not throw
  740. __exec._M_returning = true;
  741. }
  742. }
  743. #endif // _GLIBCXX_HAS_GTHREADS
  744. /// @} group mutexes
  745. _GLIBCXX_END_NAMESPACE_VERSION
  746. } // namespace
  747. #endif // C++11
  748. #endif // _GLIBCXX_MUTEX