shared_mutex 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. // <shared_mutex> -*- C++ -*-
  2. // Copyright (C) 2013-2018 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/shared_mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_SHARED_MUTEX
  24. #define _GLIBCXX_SHARED_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus >= 201402L
  27. #include <bits/c++config.h>
  28. #include <condition_variable>
  29. #include <bits/functexcept.h>
  30. namespace std _GLIBCXX_VISIBILITY(default)
  31. {
  32. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  33. /**
  34. * @ingroup mutexes
  35. * @{
  36. */
  37. #ifdef _GLIBCXX_USE_C99_STDINT_TR1
  38. #ifdef _GLIBCXX_HAS_GTHREADS
  39. #if __cplusplus >= 201703L
  40. #define __cpp_lib_shared_mutex 201505
  41. class shared_mutex;
  42. #endif
  43. #define __cpp_lib_shared_timed_mutex 201402
  44. class shared_timed_mutex;
  45. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  46. /// A shared mutex type implemented using pthread_rwlock_t.
  47. class __shared_mutex_pthread
  48. {
  49. friend class shared_timed_mutex;
  50. #ifdef PTHREAD_RWLOCK_INITIALIZER
  51. pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  52. public:
  53. __shared_mutex_pthread() = default;
  54. ~__shared_mutex_pthread() = default;
  55. #else
  56. pthread_rwlock_t _M_rwlock;
  57. public:
  58. __shared_mutex_pthread()
  59. {
  60. int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
  61. if (__ret == ENOMEM)
  62. __throw_bad_alloc();
  63. else if (__ret == EAGAIN)
  64. __throw_system_error(int(errc::resource_unavailable_try_again));
  65. else if (__ret == EPERM)
  66. __throw_system_error(int(errc::operation_not_permitted));
  67. // Errors not handled: EBUSY, EINVAL
  68. __glibcxx_assert(__ret == 0);
  69. }
  70. ~__shared_mutex_pthread()
  71. {
  72. int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
  73. // Errors not handled: EBUSY, EINVAL
  74. __glibcxx_assert(__ret == 0);
  75. }
  76. #endif
  77. __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
  78. __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
  79. void
  80. lock()
  81. {
  82. int __ret = pthread_rwlock_wrlock(&_M_rwlock);
  83. if (__ret == EDEADLK)
  84. __throw_system_error(int(errc::resource_deadlock_would_occur));
  85. // Errors not handled: EINVAL
  86. __glibcxx_assert(__ret == 0);
  87. }
  88. bool
  89. try_lock()
  90. {
  91. int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
  92. if (__ret == EBUSY) return false;
  93. // Errors not handled: EINVAL
  94. __glibcxx_assert(__ret == 0);
  95. return true;
  96. }
  97. void
  98. unlock()
  99. {
  100. int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
  101. // Errors not handled: EPERM, EBUSY, EINVAL
  102. __glibcxx_assert(__ret == 0);
  103. }
  104. // Shared ownership
  105. void
  106. lock_shared()
  107. {
  108. int __ret;
  109. // We retry if we exceeded the maximum number of read locks supported by
  110. // the POSIX implementation; this can result in busy-waiting, but this
  111. // is okay based on the current specification of forward progress
  112. // guarantees by the standard.
  113. do
  114. __ret = pthread_rwlock_rdlock(&_M_rwlock);
  115. while (__ret == EAGAIN);
  116. if (__ret == EDEADLK)
  117. __throw_system_error(int(errc::resource_deadlock_would_occur));
  118. // Errors not handled: EINVAL
  119. __glibcxx_assert(__ret == 0);
  120. }
  121. bool
  122. try_lock_shared()
  123. {
  124. int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
  125. // If the maximum number of read locks has been exceeded, we just fail
  126. // to acquire the lock. Unlike for lock(), we are not allowed to throw
  127. // an exception.
  128. if (__ret == EBUSY || __ret == EAGAIN) return false;
  129. // Errors not handled: EINVAL
  130. __glibcxx_assert(__ret == 0);
  131. return true;
  132. }
  133. void
  134. unlock_shared()
  135. {
  136. unlock();
  137. }
  138. void* native_handle() { return &_M_rwlock; }
  139. };
  140. #endif
  141. #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  142. /// A shared mutex type implemented using std::condition_variable.
  143. class __shared_mutex_cv
  144. {
  145. friend class shared_timed_mutex;
  146. // Based on Howard Hinnant's reference implementation from N2406.
  147. // The high bit of _M_state is the write-entered flag which is set to
  148. // indicate a writer has taken the lock or is queuing to take the lock.
  149. // The remaining bits are the count of reader locks.
  150. //
  151. // To take a reader lock, block on gate1 while the write-entered flag is
  152. // set or the maximum number of reader locks is held, then increment the
  153. // reader lock count.
  154. // To release, decrement the count, then if the write-entered flag is set
  155. // and the count is zero then signal gate2 to wake a queued writer,
  156. // otherwise if the maximum number of reader locks was held signal gate1
  157. // to wake a reader.
  158. //
  159. // To take a writer lock, block on gate1 while the write-entered flag is
  160. // set, then set the write-entered flag to start queueing, then block on
  161. // gate2 while the number of reader locks is non-zero.
  162. // To release, unset the write-entered flag and signal gate1 to wake all
  163. // blocked readers and writers.
  164. //
  165. // This means that when no reader locks are held readers and writers get
  166. // equal priority. When one or more reader locks is held a writer gets
  167. // priority and no more reader locks can be taken while the writer is
  168. // queued.
  169. // Only locked when accessing _M_state or waiting on condition variables.
  170. mutex _M_mut;
  171. // Used to block while write-entered is set or reader count at maximum.
  172. condition_variable _M_gate1;
  173. // Used to block queued writers while reader count is non-zero.
  174. condition_variable _M_gate2;
  175. // The write-entered flag and reader count.
  176. unsigned _M_state;
  177. static constexpr unsigned _S_write_entered
  178. = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
  179. static constexpr unsigned _S_max_readers = ~_S_write_entered;
  180. // Test whether the write-entered flag is set. _M_mut must be locked.
  181. bool _M_write_entered() const { return _M_state & _S_write_entered; }
  182. // The number of reader locks currently held. _M_mut must be locked.
  183. unsigned _M_readers() const { return _M_state & _S_max_readers; }
  184. public:
  185. __shared_mutex_cv() : _M_state(0) {}
  186. ~__shared_mutex_cv()
  187. {
  188. __glibcxx_assert( _M_state == 0 );
  189. }
  190. __shared_mutex_cv(const __shared_mutex_cv&) = delete;
  191. __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
  192. // Exclusive ownership
  193. void
  194. lock()
  195. {
  196. unique_lock<mutex> __lk(_M_mut);
  197. // Wait until we can set the write-entered flag.
  198. _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
  199. _M_state |= _S_write_entered;
  200. // Then wait until there are no more readers.
  201. _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
  202. }
  203. bool
  204. try_lock()
  205. {
  206. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  207. if (__lk.owns_lock() && _M_state == 0)
  208. {
  209. _M_state = _S_write_entered;
  210. return true;
  211. }
  212. return false;
  213. }
  214. void
  215. unlock()
  216. {
  217. lock_guard<mutex> __lk(_M_mut);
  218. __glibcxx_assert( _M_write_entered() );
  219. _M_state = 0;
  220. // call notify_all() while mutex is held so that another thread can't
  221. // lock and unlock the mutex then destroy *this before we make the call.
  222. _M_gate1.notify_all();
  223. }
  224. // Shared ownership
  225. void
  226. lock_shared()
  227. {
  228. unique_lock<mutex> __lk(_M_mut);
  229. _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
  230. ++_M_state;
  231. }
  232. bool
  233. try_lock_shared()
  234. {
  235. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  236. if (!__lk.owns_lock())
  237. return false;
  238. if (_M_state < _S_max_readers)
  239. {
  240. ++_M_state;
  241. return true;
  242. }
  243. return false;
  244. }
  245. void
  246. unlock_shared()
  247. {
  248. lock_guard<mutex> __lk(_M_mut);
  249. __glibcxx_assert( _M_readers() > 0 );
  250. auto __prev = _M_state--;
  251. if (_M_write_entered())
  252. {
  253. // Wake the queued writer if there are no more readers.
  254. if (_M_readers() == 0)
  255. _M_gate2.notify_one();
  256. // No need to notify gate1 because we give priority to the queued
  257. // writer, and that writer will eventually notify gate1 after it
  258. // clears the write-entered flag.
  259. }
  260. else
  261. {
  262. // Wake any thread that was blocked on reader overflow.
  263. if (__prev == _S_max_readers)
  264. _M_gate1.notify_one();
  265. }
  266. }
  267. };
  268. #endif
  269. #if __cplusplus > 201402L
  270. /// The standard shared mutex type.
  271. class shared_mutex
  272. {
  273. public:
  274. shared_mutex() = default;
  275. ~shared_mutex() = default;
  276. shared_mutex(const shared_mutex&) = delete;
  277. shared_mutex& operator=(const shared_mutex&) = delete;
  278. // Exclusive ownership
  279. void lock() { _M_impl.lock(); }
  280. bool try_lock() { return _M_impl.try_lock(); }
  281. void unlock() { _M_impl.unlock(); }
  282. // Shared ownership
  283. void lock_shared() { _M_impl.lock_shared(); }
  284. bool try_lock_shared() { return _M_impl.try_lock_shared(); }
  285. void unlock_shared() { _M_impl.unlock_shared(); }
  286. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  287. typedef void* native_handle_type;
  288. native_handle_type native_handle() { return _M_impl.native_handle(); }
  289. private:
  290. __shared_mutex_pthread _M_impl;
  291. #else
  292. private:
  293. __shared_mutex_cv _M_impl;
  294. #endif
  295. };
  296. #endif // C++17
  297. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  298. using __shared_timed_mutex_base = __shared_mutex_pthread;
  299. #else
  300. using __shared_timed_mutex_base = __shared_mutex_cv;
  301. #endif
  302. /// The standard shared timed mutex type.
  303. class shared_timed_mutex
  304. : private __shared_timed_mutex_base
  305. {
  306. using _Base = __shared_timed_mutex_base;
  307. // Must use the same clock as condition_variable for __shared_mutex_cv.
  308. typedef chrono::system_clock __clock_t;
  309. public:
  310. shared_timed_mutex() = default;
  311. ~shared_timed_mutex() = default;
  312. shared_timed_mutex(const shared_timed_mutex&) = delete;
  313. shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
  314. // Exclusive ownership
  315. void lock() { _Base::lock(); }
  316. bool try_lock() { return _Base::try_lock(); }
  317. void unlock() { _Base::unlock(); }
  318. template<typename _Rep, typename _Period>
  319. bool
  320. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  321. {
  322. return try_lock_until(__clock_t::now() + __rel_time);
  323. }
  324. // Shared ownership
  325. void lock_shared() { _Base::lock_shared(); }
  326. bool try_lock_shared() { return _Base::try_lock_shared(); }
  327. void unlock_shared() { _Base::unlock_shared(); }
  328. template<typename _Rep, typename _Period>
  329. bool
  330. try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
  331. {
  332. return try_lock_shared_until(__clock_t::now() + __rel_time);
  333. }
  334. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  335. // Exclusive ownership
  336. template<typename _Duration>
  337. bool
  338. try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
  339. {
  340. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  341. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  342. __gthread_time_t __ts =
  343. {
  344. static_cast<std::time_t>(__s.time_since_epoch().count()),
  345. static_cast<long>(__ns.count())
  346. };
  347. int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
  348. // On self-deadlock, we just fail to acquire the lock. Technically,
  349. // the program violated the precondition.
  350. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  351. return false;
  352. // Errors not handled: EINVAL
  353. __glibcxx_assert(__ret == 0);
  354. return true;
  355. }
  356. template<typename _Clock, typename _Duration>
  357. bool
  358. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  359. {
  360. // DR 887 - Sync unknown clock to known clock.
  361. const typename _Clock::time_point __c_entry = _Clock::now();
  362. const __clock_t::time_point __s_entry = __clock_t::now();
  363. const auto __delta = __abs_time - __c_entry;
  364. const auto __s_atime = __s_entry + __delta;
  365. return try_lock_until(__s_atime);
  366. }
  367. // Shared ownership
  368. template<typename _Duration>
  369. bool
  370. try_lock_shared_until(const chrono::time_point<__clock_t,
  371. _Duration>& __atime)
  372. {
  373. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  374. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  375. __gthread_time_t __ts =
  376. {
  377. static_cast<std::time_t>(__s.time_since_epoch().count()),
  378. static_cast<long>(__ns.count())
  379. };
  380. int __ret;
  381. // Unlike for lock(), we are not allowed to throw an exception so if
  382. // the maximum number of read locks has been exceeded, or we would
  383. // deadlock, we just try to acquire the lock again (and will time out
  384. // eventually).
  385. // In cases where we would exceed the maximum number of read locks
  386. // throughout the whole time until the timeout, we will fail to
  387. // acquire the lock even if it would be logically free; however, this
  388. // is allowed by the standard, and we made a "strong effort"
  389. // (see C++14 30.4.1.4p26).
  390. // For cases where the implementation detects a deadlock we
  391. // intentionally block and timeout so that an early return isn't
  392. // mistaken for a spurious failure, which might help users realise
  393. // there is a deadlock.
  394. do
  395. __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
  396. while (__ret == EAGAIN || __ret == EDEADLK);
  397. if (__ret == ETIMEDOUT)
  398. return false;
  399. // Errors not handled: EINVAL
  400. __glibcxx_assert(__ret == 0);
  401. return true;
  402. }
  403. template<typename _Clock, typename _Duration>
  404. bool
  405. try_lock_shared_until(const chrono::time_point<_Clock,
  406. _Duration>& __abs_time)
  407. {
  408. // DR 887 - Sync unknown clock to known clock.
  409. const typename _Clock::time_point __c_entry = _Clock::now();
  410. const __clock_t::time_point __s_entry = __clock_t::now();
  411. const auto __delta = __abs_time - __c_entry;
  412. const auto __s_atime = __s_entry + __delta;
  413. return try_lock_shared_until(__s_atime);
  414. }
  415. #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  416. // Exclusive ownership
  417. template<typename _Clock, typename _Duration>
  418. bool
  419. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  420. {
  421. unique_lock<mutex> __lk(_M_mut);
  422. if (!_M_gate1.wait_until(__lk, __abs_time,
  423. [=]{ return !_M_write_entered(); }))
  424. {
  425. return false;
  426. }
  427. _M_state |= _S_write_entered;
  428. if (!_M_gate2.wait_until(__lk, __abs_time,
  429. [=]{ return _M_readers() == 0; }))
  430. {
  431. _M_state ^= _S_write_entered;
  432. // Wake all threads blocked while the write-entered flag was set.
  433. _M_gate1.notify_all();
  434. return false;
  435. }
  436. return true;
  437. }
  438. // Shared ownership
  439. template <typename _Clock, typename _Duration>
  440. bool
  441. try_lock_shared_until(const chrono::time_point<_Clock,
  442. _Duration>& __abs_time)
  443. {
  444. unique_lock<mutex> __lk(_M_mut);
  445. if (!_M_gate1.wait_until(__lk, __abs_time,
  446. [=]{ return _M_state < _S_max_readers; }))
  447. {
  448. return false;
  449. }
  450. ++_M_state;
  451. return true;
  452. }
  453. #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  454. };
  455. #endif // _GLIBCXX_HAS_GTHREADS
  456. /// shared_lock
  457. template<typename _Mutex>
  458. class shared_lock
  459. {
  460. public:
  461. typedef _Mutex mutex_type;
  462. // Shared locking
  463. shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
  464. explicit
  465. shared_lock(mutex_type& __m)
  466. : _M_pm(std::__addressof(__m)), _M_owns(true)
  467. { __m.lock_shared(); }
  468. shared_lock(mutex_type& __m, defer_lock_t) noexcept
  469. : _M_pm(std::__addressof(__m)), _M_owns(false) { }
  470. shared_lock(mutex_type& __m, try_to_lock_t)
  471. : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
  472. shared_lock(mutex_type& __m, adopt_lock_t)
  473. : _M_pm(std::__addressof(__m)), _M_owns(true) { }
  474. template<typename _Clock, typename _Duration>
  475. shared_lock(mutex_type& __m,
  476. const chrono::time_point<_Clock, _Duration>& __abs_time)
  477. : _M_pm(std::__addressof(__m)),
  478. _M_owns(__m.try_lock_shared_until(__abs_time)) { }
  479. template<typename _Rep, typename _Period>
  480. shared_lock(mutex_type& __m,
  481. const chrono::duration<_Rep, _Period>& __rel_time)
  482. : _M_pm(std::__addressof(__m)),
  483. _M_owns(__m.try_lock_shared_for(__rel_time)) { }
  484. ~shared_lock()
  485. {
  486. if (_M_owns)
  487. _M_pm->unlock_shared();
  488. }
  489. shared_lock(shared_lock const&) = delete;
  490. shared_lock& operator=(shared_lock const&) = delete;
  491. shared_lock(shared_lock&& __sl) noexcept : shared_lock()
  492. { swap(__sl); }
  493. shared_lock&
  494. operator=(shared_lock&& __sl) noexcept
  495. {
  496. shared_lock(std::move(__sl)).swap(*this);
  497. return *this;
  498. }
  499. void
  500. lock()
  501. {
  502. _M_lockable();
  503. _M_pm->lock_shared();
  504. _M_owns = true;
  505. }
  506. bool
  507. try_lock()
  508. {
  509. _M_lockable();
  510. return _M_owns = _M_pm->try_lock_shared();
  511. }
  512. template<typename _Rep, typename _Period>
  513. bool
  514. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  515. {
  516. _M_lockable();
  517. return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
  518. }
  519. template<typename _Clock, typename _Duration>
  520. bool
  521. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  522. {
  523. _M_lockable();
  524. return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
  525. }
  526. void
  527. unlock()
  528. {
  529. if (!_M_owns)
  530. __throw_system_error(int(errc::resource_deadlock_would_occur));
  531. _M_pm->unlock_shared();
  532. _M_owns = false;
  533. }
  534. // Setters
  535. void
  536. swap(shared_lock& __u) noexcept
  537. {
  538. std::swap(_M_pm, __u._M_pm);
  539. std::swap(_M_owns, __u._M_owns);
  540. }
  541. mutex_type*
  542. release() noexcept
  543. {
  544. _M_owns = false;
  545. return std::exchange(_M_pm, nullptr);
  546. }
  547. // Getters
  548. bool owns_lock() const noexcept { return _M_owns; }
  549. explicit operator bool() const noexcept { return _M_owns; }
  550. mutex_type* mutex() const noexcept { return _M_pm; }
  551. private:
  552. void
  553. _M_lockable() const
  554. {
  555. if (_M_pm == nullptr)
  556. __throw_system_error(int(errc::operation_not_permitted));
  557. if (_M_owns)
  558. __throw_system_error(int(errc::resource_deadlock_would_occur));
  559. }
  560. mutex_type* _M_pm;
  561. bool _M_owns;
  562. };
  563. /// Swap specialization for shared_lock
  564. template<typename _Mutex>
  565. void
  566. swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
  567. { __x.swap(__y); }
  568. #endif // _GLIBCXX_USE_C99_STDINT_TR1
  569. // @} group mutexes
  570. _GLIBCXX_END_NAMESPACE_VERSION
  571. } // namespace
  572. #endif // C++14
  573. #endif // _GLIBCXX_SHARED_MUTEX