shared_mutex 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. // <shared_mutex> -*- C++ -*-
  2. // Copyright (C) 2013-2019 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/shared_mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_SHARED_MUTEX
  24. #define _GLIBCXX_SHARED_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus >= 201402L
  27. #include <bits/c++config.h>
  28. #include <condition_variable>
  29. #include <bits/functexcept.h>
  30. namespace std _GLIBCXX_VISIBILITY(default)
  31. {
  32. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  33. /**
  34. * @ingroup mutexes
  35. * @{
  36. */
  37. #ifdef _GLIBCXX_HAS_GTHREADS
  38. #if __cplusplus >= 201703L
  39. #define __cpp_lib_shared_mutex 201505
  40. class shared_mutex;
  41. #endif
  42. #define __cpp_lib_shared_timed_mutex 201402
  43. class shared_timed_mutex;
  44. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  45. #ifdef __gthrw
  46. #define _GLIBCXX_GTHRW(name) \
  47. __gthrw(pthread_ ## name); \
  48. static inline int \
  49. __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
  50. { \
  51. if (__gthread_active_p ()) \
  52. return __gthrw_(pthread_ ## name) (__rwlock); \
  53. else \
  54. return 0; \
  55. }
  56. _GLIBCXX_GTHRW(rwlock_rdlock)
  57. _GLIBCXX_GTHRW(rwlock_tryrdlock)
  58. _GLIBCXX_GTHRW(rwlock_wrlock)
  59. _GLIBCXX_GTHRW(rwlock_trywrlock)
  60. _GLIBCXX_GTHRW(rwlock_unlock)
  61. # ifndef PTHREAD_RWLOCK_INITIALIZER
  62. _GLIBCXX_GTHRW(rwlock_destroy)
  63. __gthrw(pthread_rwlock_init);
  64. static inline int
  65. __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
  66. {
  67. if (__gthread_active_p ())
  68. return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
  69. else
  70. return 0;
  71. }
  72. # endif
  73. # if _GTHREAD_USE_MUTEX_TIMEDLOCK
  74. __gthrw(pthread_rwlock_timedrdlock);
  75. static inline int
  76. __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
  77. const timespec *__ts)
  78. {
  79. if (__gthread_active_p ())
  80. return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
  81. else
  82. return 0;
  83. }
  84. __gthrw(pthread_rwlock_timedwrlock);
  85. static inline int
  86. __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
  87. const timespec *__ts)
  88. {
  89. if (__gthread_active_p ())
  90. return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
  91. else
  92. return 0;
  93. }
  94. # endif
  95. #else
  96. static inline int
  97. __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
  98. { return pthread_rwlock_rdlock (__rwlock); }
  99. static inline int
  100. __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
  101. { return pthread_rwlock_tryrdlock (__rwlock); }
  102. static inline int
  103. __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
  104. { return pthread_rwlock_wrlock (__rwlock); }
  105. static inline int
  106. __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
  107. { return pthread_rwlock_trywrlock (__rwlock); }
  108. static inline int
  109. __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
  110. { return pthread_rwlock_unlock (__rwlock); }
  111. static inline int
  112. __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
  113. { return pthread_rwlock_destroy (__rwlock); }
  114. static inline int
  115. __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
  116. { return pthread_rwlock_init (__rwlock, NULL); }
  117. # if _GTHREAD_USE_MUTEX_TIMEDLOCK
  118. static inline int
  119. __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
  120. const timespec *__ts)
  121. { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
  122. static inline int
  123. __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
  124. const timespec *__ts)
  125. { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
  126. # endif
  127. #endif
  128. /// A shared mutex type implemented using pthread_rwlock_t.
  129. class __shared_mutex_pthread
  130. {
  131. friend class shared_timed_mutex;
  132. #ifdef PTHREAD_RWLOCK_INITIALIZER
  133. pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  134. public:
  135. __shared_mutex_pthread() = default;
  136. ~__shared_mutex_pthread() = default;
  137. #else
  138. pthread_rwlock_t _M_rwlock;
  139. public:
  140. __shared_mutex_pthread()
  141. {
  142. int __ret = __glibcxx_rwlock_init(&_M_rwlock, NULL);
  143. if (__ret == ENOMEM)
  144. __throw_bad_alloc();
  145. else if (__ret == EAGAIN)
  146. __throw_system_error(int(errc::resource_unavailable_try_again));
  147. else if (__ret == EPERM)
  148. __throw_system_error(int(errc::operation_not_permitted));
  149. // Errors not handled: EBUSY, EINVAL
  150. __glibcxx_assert(__ret == 0);
  151. }
  152. ~__shared_mutex_pthread()
  153. {
  154. int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
  155. // Errors not handled: EBUSY, EINVAL
  156. __glibcxx_assert(__ret == 0);
  157. }
  158. #endif
  159. __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
  160. __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
  161. void
  162. lock()
  163. {
  164. int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
  165. if (__ret == EDEADLK)
  166. __throw_system_error(int(errc::resource_deadlock_would_occur));
  167. // Errors not handled: EINVAL
  168. __glibcxx_assert(__ret == 0);
  169. }
  170. bool
  171. try_lock()
  172. {
  173. int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
  174. if (__ret == EBUSY) return false;
  175. // Errors not handled: EINVAL
  176. __glibcxx_assert(__ret == 0);
  177. return true;
  178. }
  179. void
  180. unlock()
  181. {
  182. int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
  183. // Errors not handled: EPERM, EBUSY, EINVAL
  184. __glibcxx_assert(__ret == 0);
  185. }
  186. // Shared ownership
  187. void
  188. lock_shared()
  189. {
  190. int __ret;
  191. // We retry if we exceeded the maximum number of read locks supported by
  192. // the POSIX implementation; this can result in busy-waiting, but this
  193. // is okay based on the current specification of forward progress
  194. // guarantees by the standard.
  195. do
  196. __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
  197. while (__ret == EAGAIN);
  198. if (__ret == EDEADLK)
  199. __throw_system_error(int(errc::resource_deadlock_would_occur));
  200. // Errors not handled: EINVAL
  201. __glibcxx_assert(__ret == 0);
  202. }
  203. bool
  204. try_lock_shared()
  205. {
  206. int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
  207. // If the maximum number of read locks has been exceeded, we just fail
  208. // to acquire the lock. Unlike for lock(), we are not allowed to throw
  209. // an exception.
  210. if (__ret == EBUSY || __ret == EAGAIN) return false;
  211. // Errors not handled: EINVAL
  212. __glibcxx_assert(__ret == 0);
  213. return true;
  214. }
  215. void
  216. unlock_shared()
  217. {
  218. unlock();
  219. }
  220. void* native_handle() { return &_M_rwlock; }
  221. };
  222. #endif
  223. #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  224. /// A shared mutex type implemented using std::condition_variable.
  225. class __shared_mutex_cv
  226. {
  227. friend class shared_timed_mutex;
  228. // Based on Howard Hinnant's reference implementation from N2406.
  229. // The high bit of _M_state is the write-entered flag which is set to
  230. // indicate a writer has taken the lock or is queuing to take the lock.
  231. // The remaining bits are the count of reader locks.
  232. //
  233. // To take a reader lock, block on gate1 while the write-entered flag is
  234. // set or the maximum number of reader locks is held, then increment the
  235. // reader lock count.
  236. // To release, decrement the count, then if the write-entered flag is set
  237. // and the count is zero then signal gate2 to wake a queued writer,
  238. // otherwise if the maximum number of reader locks was held signal gate1
  239. // to wake a reader.
  240. //
  241. // To take a writer lock, block on gate1 while the write-entered flag is
  242. // set, then set the write-entered flag to start queueing, then block on
  243. // gate2 while the number of reader locks is non-zero.
  244. // To release, unset the write-entered flag and signal gate1 to wake all
  245. // blocked readers and writers.
  246. //
  247. // This means that when no reader locks are held readers and writers get
  248. // equal priority. When one or more reader locks is held a writer gets
  249. // priority and no more reader locks can be taken while the writer is
  250. // queued.
  251. // Only locked when accessing _M_state or waiting on condition variables.
  252. mutex _M_mut;
  253. // Used to block while write-entered is set or reader count at maximum.
  254. condition_variable _M_gate1;
  255. // Used to block queued writers while reader count is non-zero.
  256. condition_variable _M_gate2;
  257. // The write-entered flag and reader count.
  258. unsigned _M_state;
  259. static constexpr unsigned _S_write_entered
  260. = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
  261. static constexpr unsigned _S_max_readers = ~_S_write_entered;
  262. // Test whether the write-entered flag is set. _M_mut must be locked.
  263. bool _M_write_entered() const { return _M_state & _S_write_entered; }
  264. // The number of reader locks currently held. _M_mut must be locked.
  265. unsigned _M_readers() const { return _M_state & _S_max_readers; }
  266. public:
  267. __shared_mutex_cv() : _M_state(0) {}
  268. ~__shared_mutex_cv()
  269. {
  270. __glibcxx_assert( _M_state == 0 );
  271. }
  272. __shared_mutex_cv(const __shared_mutex_cv&) = delete;
  273. __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
  274. // Exclusive ownership
  275. void
  276. lock()
  277. {
  278. unique_lock<mutex> __lk(_M_mut);
  279. // Wait until we can set the write-entered flag.
  280. _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
  281. _M_state |= _S_write_entered;
  282. // Then wait until there are no more readers.
  283. _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
  284. }
  285. bool
  286. try_lock()
  287. {
  288. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  289. if (__lk.owns_lock() && _M_state == 0)
  290. {
  291. _M_state = _S_write_entered;
  292. return true;
  293. }
  294. return false;
  295. }
  296. void
  297. unlock()
  298. {
  299. lock_guard<mutex> __lk(_M_mut);
  300. __glibcxx_assert( _M_write_entered() );
  301. _M_state = 0;
  302. // call notify_all() while mutex is held so that another thread can't
  303. // lock and unlock the mutex then destroy *this before we make the call.
  304. _M_gate1.notify_all();
  305. }
  306. // Shared ownership
  307. void
  308. lock_shared()
  309. {
  310. unique_lock<mutex> __lk(_M_mut);
  311. _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
  312. ++_M_state;
  313. }
  314. bool
  315. try_lock_shared()
  316. {
  317. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  318. if (!__lk.owns_lock())
  319. return false;
  320. if (_M_state < _S_max_readers)
  321. {
  322. ++_M_state;
  323. return true;
  324. }
  325. return false;
  326. }
  327. void
  328. unlock_shared()
  329. {
  330. lock_guard<mutex> __lk(_M_mut);
  331. __glibcxx_assert( _M_readers() > 0 );
  332. auto __prev = _M_state--;
  333. if (_M_write_entered())
  334. {
  335. // Wake the queued writer if there are no more readers.
  336. if (_M_readers() == 0)
  337. _M_gate2.notify_one();
  338. // No need to notify gate1 because we give priority to the queued
  339. // writer, and that writer will eventually notify gate1 after it
  340. // clears the write-entered flag.
  341. }
  342. else
  343. {
  344. // Wake any thread that was blocked on reader overflow.
  345. if (__prev == _S_max_readers)
  346. _M_gate1.notify_one();
  347. }
  348. }
  349. };
  350. #endif
  351. #if __cplusplus > 201402L
  352. /// The standard shared mutex type.
  353. class shared_mutex
  354. {
  355. public:
  356. shared_mutex() = default;
  357. ~shared_mutex() = default;
  358. shared_mutex(const shared_mutex&) = delete;
  359. shared_mutex& operator=(const shared_mutex&) = delete;
  360. // Exclusive ownership
  361. void lock() { _M_impl.lock(); }
  362. bool try_lock() { return _M_impl.try_lock(); }
  363. void unlock() { _M_impl.unlock(); }
  364. // Shared ownership
  365. void lock_shared() { _M_impl.lock_shared(); }
  366. bool try_lock_shared() { return _M_impl.try_lock_shared(); }
  367. void unlock_shared() { _M_impl.unlock_shared(); }
  368. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  369. typedef void* native_handle_type;
  370. native_handle_type native_handle() { return _M_impl.native_handle(); }
  371. private:
  372. __shared_mutex_pthread _M_impl;
  373. #else
  374. private:
  375. __shared_mutex_cv _M_impl;
  376. #endif
  377. };
  378. #endif // C++17
  379. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  380. using __shared_timed_mutex_base = __shared_mutex_pthread;
  381. #else
  382. using __shared_timed_mutex_base = __shared_mutex_cv;
  383. #endif
  384. /// The standard shared timed mutex type.
  385. class shared_timed_mutex
  386. : private __shared_timed_mutex_base
  387. {
  388. using _Base = __shared_timed_mutex_base;
  389. // Must use the same clock as condition_variable for __shared_mutex_cv.
  390. typedef chrono::system_clock __clock_t;
  391. public:
  392. shared_timed_mutex() = default;
  393. ~shared_timed_mutex() = default;
  394. shared_timed_mutex(const shared_timed_mutex&) = delete;
  395. shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
  396. // Exclusive ownership
  397. void lock() { _Base::lock(); }
  398. bool try_lock() { return _Base::try_lock(); }
  399. void unlock() { _Base::unlock(); }
  400. template<typename _Rep, typename _Period>
  401. bool
  402. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  403. {
  404. return try_lock_until(__clock_t::now() + __rel_time);
  405. }
  406. // Shared ownership
  407. void lock_shared() { _Base::lock_shared(); }
  408. bool try_lock_shared() { return _Base::try_lock_shared(); }
  409. void unlock_shared() { _Base::unlock_shared(); }
  410. template<typename _Rep, typename _Period>
  411. bool
  412. try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
  413. {
  414. return try_lock_shared_until(__clock_t::now() + __rel_time);
  415. }
  416. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  417. // Exclusive ownership
  418. template<typename _Duration>
  419. bool
  420. try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
  421. {
  422. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  423. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  424. __gthread_time_t __ts =
  425. {
  426. static_cast<std::time_t>(__s.time_since_epoch().count()),
  427. static_cast<long>(__ns.count())
  428. };
  429. int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
  430. // On self-deadlock, we just fail to acquire the lock. Technically,
  431. // the program violated the precondition.
  432. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  433. return false;
  434. // Errors not handled: EINVAL
  435. __glibcxx_assert(__ret == 0);
  436. return true;
  437. }
  438. template<typename _Clock, typename _Duration>
  439. bool
  440. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  441. {
  442. // DR 887 - Sync unknown clock to known clock.
  443. const typename _Clock::time_point __c_entry = _Clock::now();
  444. const __clock_t::time_point __s_entry = __clock_t::now();
  445. const auto __delta = __abs_time - __c_entry;
  446. const auto __s_atime = __s_entry + __delta;
  447. return try_lock_until(__s_atime);
  448. }
  449. // Shared ownership
  450. template<typename _Duration>
  451. bool
  452. try_lock_shared_until(const chrono::time_point<__clock_t,
  453. _Duration>& __atime)
  454. {
  455. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  456. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  457. __gthread_time_t __ts =
  458. {
  459. static_cast<std::time_t>(__s.time_since_epoch().count()),
  460. static_cast<long>(__ns.count())
  461. };
  462. int __ret;
  463. // Unlike for lock(), we are not allowed to throw an exception so if
  464. // the maximum number of read locks has been exceeded, or we would
  465. // deadlock, we just try to acquire the lock again (and will time out
  466. // eventually).
  467. // In cases where we would exceed the maximum number of read locks
  468. // throughout the whole time until the timeout, we will fail to
  469. // acquire the lock even if it would be logically free; however, this
  470. // is allowed by the standard, and we made a "strong effort"
  471. // (see C++14 30.4.1.4p26).
  472. // For cases where the implementation detects a deadlock we
  473. // intentionally block and timeout so that an early return isn't
  474. // mistaken for a spurious failure, which might help users realise
  475. // there is a deadlock.
  476. do
  477. __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
  478. while (__ret == EAGAIN || __ret == EDEADLK);
  479. if (__ret == ETIMEDOUT)
  480. return false;
  481. // Errors not handled: EINVAL
  482. __glibcxx_assert(__ret == 0);
  483. return true;
  484. }
  485. template<typename _Clock, typename _Duration>
  486. bool
  487. try_lock_shared_until(const chrono::time_point<_Clock,
  488. _Duration>& __abs_time)
  489. {
  490. // DR 887 - Sync unknown clock to known clock.
  491. const typename _Clock::time_point __c_entry = _Clock::now();
  492. const __clock_t::time_point __s_entry = __clock_t::now();
  493. const auto __delta = __abs_time - __c_entry;
  494. const auto __s_atime = __s_entry + __delta;
  495. return try_lock_shared_until(__s_atime);
  496. }
  497. #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  498. // Exclusive ownership
  499. template<typename _Clock, typename _Duration>
  500. bool
  501. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  502. {
  503. unique_lock<mutex> __lk(_M_mut);
  504. if (!_M_gate1.wait_until(__lk, __abs_time,
  505. [=]{ return !_M_write_entered(); }))
  506. {
  507. return false;
  508. }
  509. _M_state |= _S_write_entered;
  510. if (!_M_gate2.wait_until(__lk, __abs_time,
  511. [=]{ return _M_readers() == 0; }))
  512. {
  513. _M_state ^= _S_write_entered;
  514. // Wake all threads blocked while the write-entered flag was set.
  515. _M_gate1.notify_all();
  516. return false;
  517. }
  518. return true;
  519. }
  520. // Shared ownership
  521. template <typename _Clock, typename _Duration>
  522. bool
  523. try_lock_shared_until(const chrono::time_point<_Clock,
  524. _Duration>& __abs_time)
  525. {
  526. unique_lock<mutex> __lk(_M_mut);
  527. if (!_M_gate1.wait_until(__lk, __abs_time,
  528. [=]{ return _M_state < _S_max_readers; }))
  529. {
  530. return false;
  531. }
  532. ++_M_state;
  533. return true;
  534. }
  535. #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  536. };
  537. #endif // _GLIBCXX_HAS_GTHREADS
  538. /// shared_lock
  539. template<typename _Mutex>
  540. class shared_lock
  541. {
  542. public:
  543. typedef _Mutex mutex_type;
  544. // Shared locking
  545. shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
  546. explicit
  547. shared_lock(mutex_type& __m)
  548. : _M_pm(std::__addressof(__m)), _M_owns(true)
  549. { __m.lock_shared(); }
  550. shared_lock(mutex_type& __m, defer_lock_t) noexcept
  551. : _M_pm(std::__addressof(__m)), _M_owns(false) { }
  552. shared_lock(mutex_type& __m, try_to_lock_t)
  553. : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
  554. shared_lock(mutex_type& __m, adopt_lock_t)
  555. : _M_pm(std::__addressof(__m)), _M_owns(true) { }
  556. template<typename _Clock, typename _Duration>
  557. shared_lock(mutex_type& __m,
  558. const chrono::time_point<_Clock, _Duration>& __abs_time)
  559. : _M_pm(std::__addressof(__m)),
  560. _M_owns(__m.try_lock_shared_until(__abs_time)) { }
  561. template<typename _Rep, typename _Period>
  562. shared_lock(mutex_type& __m,
  563. const chrono::duration<_Rep, _Period>& __rel_time)
  564. : _M_pm(std::__addressof(__m)),
  565. _M_owns(__m.try_lock_shared_for(__rel_time)) { }
  566. ~shared_lock()
  567. {
  568. if (_M_owns)
  569. _M_pm->unlock_shared();
  570. }
  571. shared_lock(shared_lock const&) = delete;
  572. shared_lock& operator=(shared_lock const&) = delete;
  573. shared_lock(shared_lock&& __sl) noexcept : shared_lock()
  574. { swap(__sl); }
  575. shared_lock&
  576. operator=(shared_lock&& __sl) noexcept
  577. {
  578. shared_lock(std::move(__sl)).swap(*this);
  579. return *this;
  580. }
  581. void
  582. lock()
  583. {
  584. _M_lockable();
  585. _M_pm->lock_shared();
  586. _M_owns = true;
  587. }
  588. bool
  589. try_lock()
  590. {
  591. _M_lockable();
  592. return _M_owns = _M_pm->try_lock_shared();
  593. }
  594. template<typename _Rep, typename _Period>
  595. bool
  596. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  597. {
  598. _M_lockable();
  599. return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
  600. }
  601. template<typename _Clock, typename _Duration>
  602. bool
  603. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  604. {
  605. _M_lockable();
  606. return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
  607. }
  608. void
  609. unlock()
  610. {
  611. if (!_M_owns)
  612. __throw_system_error(int(errc::resource_deadlock_would_occur));
  613. _M_pm->unlock_shared();
  614. _M_owns = false;
  615. }
  616. // Setters
  617. void
  618. swap(shared_lock& __u) noexcept
  619. {
  620. std::swap(_M_pm, __u._M_pm);
  621. std::swap(_M_owns, __u._M_owns);
  622. }
  623. mutex_type*
  624. release() noexcept
  625. {
  626. _M_owns = false;
  627. return std::exchange(_M_pm, nullptr);
  628. }
  629. // Getters
  630. bool owns_lock() const noexcept { return _M_owns; }
  631. explicit operator bool() const noexcept { return _M_owns; }
  632. mutex_type* mutex() const noexcept { return _M_pm; }
  633. private:
  634. void
  635. _M_lockable() const
  636. {
  637. if (_M_pm == nullptr)
  638. __throw_system_error(int(errc::operation_not_permitted));
  639. if (_M_owns)
  640. __throw_system_error(int(errc::resource_deadlock_would_occur));
  641. }
  642. mutex_type* _M_pm;
  643. bool _M_owns;
  644. };
  645. /// Swap specialization for shared_lock
  646. template<typename _Mutex>
  647. void
  648. swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
  649. { __x.swap(__y); }
  650. // @} group mutexes
  651. _GLIBCXX_END_NAMESPACE_VERSION
  652. } // namespace
  653. #endif // C++14
  654. #endif // _GLIBCXX_SHARED_MUTEX