hashtable.h 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496
  1. // hashtable.h header -*- C++ -*-
  2. // Copyright (C) 2007-2021 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/hashtable.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
  23. */
  24. #ifndef _HASHTABLE_H
  25. #define _HASHTABLE_H 1
  26. #pragma GCC system_header
  27. #include <bits/hashtable_policy.h>
  28. #if __cplusplus > 201402L
  29. # include <bits/node_handle.h>
  30. #endif
  31. namespace std _GLIBCXX_VISIBILITY(default)
  32. {
  33. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  34. template<typename _Tp, typename _Hash>
  35. using __cache_default
  36. = __not_<__and_<// Do not cache for fast hasher.
  37. __is_fast_hash<_Hash>,
  38. // Mandatory to have erase not throwing.
  39. __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
  40. /**
  41. * Primary class template _Hashtable.
  42. *
  43. * @ingroup hashtable-detail
  44. *
  45. * @tparam _Value CopyConstructible type.
  46. *
  47. * @tparam _Key CopyConstructible type.
  48. *
  49. * @tparam _Alloc An allocator type
  50. * ([lib.allocator.requirements]) whose _Alloc::value_type is
  51. * _Value. As a conforming extension, we allow for
  52. * _Alloc::value_type != _Value.
  53. *
  54. * @tparam _ExtractKey Function object that takes an object of type
  55. * _Value and returns a value of type _Key.
  56. *
  57. * @tparam _Equal Function object that takes two objects of type k
  58. * and returns a bool-like value that is true if the two objects
  59. * are considered equal.
  60. *
  61. * @tparam _Hash The hash function. A unary function object with
  62. * argument type _Key and result type size_t. Return values should
  63. * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
  64. *
  65. * @tparam _RangeHash The range-hashing function (in the terminology of
  66. * Tavori and Dreizin). A binary function object whose argument
  67. * types and result type are all size_t. Given arguments r and N,
  68. * the return value is in the range [0, N).
  69. *
  70. * @tparam _Unused Not used.
  71. *
  72. * @tparam _RehashPolicy Policy class with three members, all of
  73. * which govern the bucket count. _M_next_bkt(n) returns a bucket
  74. * count no smaller than n. _M_bkt_for_elements(n) returns a
  75. * bucket count appropriate for an element count of n.
  76. * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
  77. * current bucket count is n_bkt and the current element count is
  78. * n_elt, we need to increase the bucket count for n_ins insertions.
  79. * If so, returns make_pair(true, n), where n is the new bucket count. If
  80. * not, returns make_pair(false, <anything>)
  81. *
  82. * @tparam _Traits Compile-time class with three boolean
  83. * std::integral_constant members: __cache_hash_code, __constant_iterators,
  84. * __unique_keys.
  85. *
  86. * Each _Hashtable data structure has:
  87. *
  88. * - _Bucket[] _M_buckets
  89. * - _Hash_node_base _M_before_begin
  90. * - size_type _M_bucket_count
  91. * - size_type _M_element_count
  92. *
  93. * with _Bucket being _Hash_node_base* and _Hash_node containing:
  94. *
  95. * - _Hash_node* _M_next
  96. * - Tp _M_value
  97. * - size_t _M_hash_code if cache_hash_code is true
  98. *
  99. * In terms of Standard containers the hashtable is like the aggregation of:
  100. *
  101. * - std::forward_list<_Node> containing the elements
  102. * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
  103. *
  104. * The non-empty buckets contain the node before the first node in the
  105. * bucket. This design makes it possible to implement something like a
  106. * std::forward_list::insert_after on container insertion and
  107. * std::forward_list::erase_after on container erase
  108. * calls. _M_before_begin is equivalent to
  109. * std::forward_list::before_begin. Empty buckets contain
  110. * nullptr. Note that one of the non-empty buckets contains
  111. * &_M_before_begin which is not a dereferenceable node so the
  112. * node pointer in a bucket shall never be dereferenced, only its
  113. * next node can be.
  114. *
  115. * Walking through a bucket's nodes requires a check on the hash code to
  116. * see if each node is still in the bucket. Such a design assumes a
  117. * quite efficient hash functor and is one of the reasons it is
  118. * highly advisable to set __cache_hash_code to true.
  119. *
  120. * The container iterators are simply built from nodes. This way
  121. * incrementing the iterator is perfectly efficient independent of
  122. * how many empty buckets there are in the container.
  123. *
  124. * On insert we compute the element's hash code and use it to find the
  125. * bucket index. If the element must be inserted in an empty bucket
  126. * we add it at the beginning of the singly linked list and make the
  127. * bucket point to _M_before_begin. The bucket that used to point to
  128. * _M_before_begin, if any, is updated to point to its new before
  129. * begin node.
  130. *
  131. * On erase, the simple iterator design requires using the hash
  132. * functor to get the index of the bucket to update. For this
  133. * reason, when __cache_hash_code is set to false the hash functor must
  134. * not throw and this is enforced by a static assertion.
  135. *
  136. * Functionality is implemented by decomposition into base classes,
  137. * where the derived _Hashtable class is used in _Map_base,
  138. * _Insert, _Rehash_base, and _Equality base classes to access the
  139. * "this" pointer. _Hashtable_base is used in the base classes as a
  140. * non-recursive, fully-completed-type so that detailed nested type
  141. * information, such as iterator type and node type, can be
  142. * used. This is similar to the "Curiously Recurring Template
  143. * Pattern" (CRTP) technique, but uses a reconstructed, not
  144. * explicitly passed, template pattern.
  145. *
  146. * Base class templates are:
  147. * - __detail::_Hashtable_base
  148. * - __detail::_Map_base
  149. * - __detail::_Insert
  150. * - __detail::_Rehash_base
  151. * - __detail::_Equality
  152. */
  153. template<typename _Key, typename _Value, typename _Alloc,
  154. typename _ExtractKey, typename _Equal,
  155. typename _Hash, typename _RangeHash, typename _Unused,
  156. typename _RehashPolicy, typename _Traits>
  157. class _Hashtable
  158. : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
  159. _Hash, _RangeHash, _Unused, _Traits>,
  160. public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  161. _Hash, _RangeHash, _Unused,
  162. _RehashPolicy, _Traits>,
  163. public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  164. _Hash, _RangeHash, _Unused,
  165. _RehashPolicy, _Traits>,
  166. public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  167. _Hash, _RangeHash, _Unused,
  168. _RehashPolicy, _Traits>,
  169. public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  170. _Hash, _RangeHash, _Unused,
  171. _RehashPolicy, _Traits>,
  172. private __detail::_Hashtable_alloc<
  173. __alloc_rebind<_Alloc,
  174. __detail::_Hash_node<_Value,
  175. _Traits::__hash_cached::value>>>
  176. {
  177. static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
  178. "unordered container must have a non-const, non-volatile value_type");
  179. #if __cplusplus > 201703L || defined __STRICT_ANSI__
  180. static_assert(is_same<typename _Alloc::value_type, _Value>{},
  181. "unordered container must have the same value_type as its allocator");
  182. #endif
  183. using __traits_type = _Traits;
  184. using __hash_cached = typename __traits_type::__hash_cached;
  185. using __constant_iterators = typename __traits_type::__constant_iterators;
  186. using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
  187. using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
  188. using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
  189. using __node_value_type =
  190. __detail::_Hash_node_value<_Value, __hash_cached::value>;
  191. using __node_ptr = typename __hashtable_alloc::__node_ptr;
  192. using __value_alloc_traits =
  193. typename __hashtable_alloc::__value_alloc_traits;
  194. using __node_alloc_traits =
  195. typename __hashtable_alloc::__node_alloc_traits;
  196. using __node_base = typename __hashtable_alloc::__node_base;
  197. using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
  198. using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
  199. using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
  200. _Equal, _Hash,
  201. _RangeHash, _Unused,
  202. _RehashPolicy, _Traits>;
  203. public:
  204. typedef _Key key_type;
  205. typedef _Value value_type;
  206. typedef _Alloc allocator_type;
  207. typedef _Equal key_equal;
  208. // mapped_type, if present, comes from _Map_base.
  209. // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
  210. typedef typename __value_alloc_traits::pointer pointer;
  211. typedef typename __value_alloc_traits::const_pointer const_pointer;
  212. typedef value_type& reference;
  213. typedef const value_type& const_reference;
  214. using iterator = typename __insert_base::iterator;
  215. using const_iterator = typename __insert_base::const_iterator;
  216. using local_iterator = __detail::_Local_iterator<key_type, _Value,
  217. _ExtractKey, _Hash, _RangeHash, _Unused,
  218. __constant_iterators::value,
  219. __hash_cached::value>;
  220. using const_local_iterator = __detail::_Local_const_iterator<
  221. key_type, _Value,
  222. _ExtractKey, _Hash, _RangeHash, _Unused,
  223. __constant_iterators::value, __hash_cached::value>;
  224. private:
  225. using __rehash_type = _RehashPolicy;
  226. using __rehash_state = typename __rehash_type::_State;
  227. using __unique_keys = typename __traits_type::__unique_keys;
  228. using __hashtable_base = __detail::
  229. _Hashtable_base<_Key, _Value, _ExtractKey,
  230. _Equal, _Hash, _RangeHash, _Unused, _Traits>;
  231. using __hash_code_base = typename __hashtable_base::__hash_code_base;
  232. using __hash_code = typename __hashtable_base::__hash_code;
  233. using __ireturn_type = typename __insert_base::__ireturn_type;
  234. using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
  235. _Equal, _Hash, _RangeHash, _Unused,
  236. _RehashPolicy, _Traits>;
  237. using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
  238. _ExtractKey, _Equal,
  239. _Hash, _RangeHash, _Unused,
  240. _RehashPolicy, _Traits>;
  241. using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
  242. _Equal, _Hash, _RangeHash, _Unused,
  243. _RehashPolicy, _Traits>;
  244. using __reuse_or_alloc_node_gen_t =
  245. __detail::_ReuseOrAllocNode<__node_alloc_type>;
  246. using __alloc_node_gen_t =
  247. __detail::_AllocNode<__node_alloc_type>;
  248. // Simple RAII type for managing a node containing an element
  249. struct _Scoped_node
  250. {
  251. // Take ownership of a node with a constructed element.
  252. _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
  253. : _M_h(__h), _M_node(__n) { }
  254. // Allocate a node and construct an element within it.
  255. template<typename... _Args>
  256. _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
  257. : _M_h(__h),
  258. _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
  259. { }
  260. // Destroy element and deallocate node.
  261. ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
  262. _Scoped_node(const _Scoped_node&) = delete;
  263. _Scoped_node& operator=(const _Scoped_node&) = delete;
  264. __hashtable_alloc* _M_h;
  265. __node_ptr _M_node;
  266. };
  267. template<typename _Ht>
  268. static constexpr
  269. typename conditional<std::is_lvalue_reference<_Ht>::value,
  270. const value_type&, value_type&&>::type
  271. __fwd_value_for(value_type& __val) noexcept
  272. { return std::move(__val); }
  273. // Compile-time diagnostics.
  274. // _Hash_code_base has everything protected, so use this derived type to
  275. // access it.
  276. struct __hash_code_base_access : __hash_code_base
  277. { using __hash_code_base::_M_bucket_index; };
  278. // Getting a bucket index from a node shall not throw because it is used
  279. // in methods (erase, swap...) that shall not throw.
  280. static_assert(noexcept(declval<const __hash_code_base_access&>()
  281. ._M_bucket_index(declval<const __node_value_type&>(),
  282. (std::size_t)0)),
  283. "Cache the hash code or qualify your functors involved"
  284. " in hash code and bucket index computation with noexcept");
  285. // To get bucket index we need _RangeHash not to throw.
  286. static_assert(is_nothrow_default_constructible<_RangeHash>::value,
  287. "Functor used to map hash code to bucket index"
  288. " must be nothrow default constructible");
  289. static_assert(noexcept(
  290. std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
  291. "Functor used to map hash code to bucket index must be"
  292. " noexcept");
  293. // To compute bucket index we also need _ExtratKey not to throw.
  294. static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
  295. "_ExtractKey must be nothrow default constructible");
  296. static_assert(noexcept(
  297. std::declval<const _ExtractKey&>()(std::declval<_Value>())),
  298. "_ExtractKey functor must be noexcept invocable");
  299. template<typename _Keya, typename _Valuea, typename _Alloca,
  300. typename _ExtractKeya, typename _Equala,
  301. typename _Hasha, typename _RangeHasha, typename _Unuseda,
  302. typename _RehashPolicya, typename _Traitsa,
  303. bool _Unique_keysa>
  304. friend struct __detail::_Map_base;
  305. template<typename _Keya, typename _Valuea, typename _Alloca,
  306. typename _ExtractKeya, typename _Equala,
  307. typename _Hasha, typename _RangeHasha, typename _Unuseda,
  308. typename _RehashPolicya, typename _Traitsa>
  309. friend struct __detail::_Insert_base;
  310. template<typename _Keya, typename _Valuea, typename _Alloca,
  311. typename _ExtractKeya, typename _Equala,
  312. typename _Hasha, typename _RangeHasha, typename _Unuseda,
  313. typename _RehashPolicya, typename _Traitsa,
  314. bool _Constant_iteratorsa>
  315. friend struct __detail::_Insert;
  316. template<typename _Keya, typename _Valuea, typename _Alloca,
  317. typename _ExtractKeya, typename _Equala,
  318. typename _Hasha, typename _RangeHasha, typename _Unuseda,
  319. typename _RehashPolicya, typename _Traitsa,
  320. bool _Unique_keysa>
  321. friend struct __detail::_Equality;
  322. public:
  323. using size_type = typename __hashtable_base::size_type;
  324. using difference_type = typename __hashtable_base::difference_type;
  325. #if __cplusplus > 201402L
  326. using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
  327. using insert_return_type = _Node_insert_return<iterator, node_type>;
  328. #endif
  329. private:
  330. __buckets_ptr _M_buckets = &_M_single_bucket;
  331. size_type _M_bucket_count = 1;
  332. __node_base _M_before_begin;
  333. size_type _M_element_count = 0;
  334. _RehashPolicy _M_rehash_policy;
  335. // A single bucket used when only need for 1 bucket. Especially
  336. // interesting in move semantic to leave hashtable with only 1 bucket
  337. // which is not allocated so that we can have those operations noexcept
  338. // qualified.
  339. // Note that we can't leave hashtable with 0 bucket without adding
  340. // numerous checks in the code to avoid 0 modulus.
  341. __node_base_ptr _M_single_bucket = nullptr;
  342. void
  343. _M_update_bbegin()
  344. {
  345. if (_M_begin())
  346. _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
  347. }
  348. void
  349. _M_update_bbegin(__node_ptr __n)
  350. {
  351. _M_before_begin._M_nxt = __n;
  352. _M_update_bbegin();
  353. }
  354. bool
  355. _M_uses_single_bucket(__buckets_ptr __bkts) const
  356. { return __builtin_expect(__bkts == &_M_single_bucket, false); }
  357. bool
  358. _M_uses_single_bucket() const
  359. { return _M_uses_single_bucket(_M_buckets); }
  360. __hashtable_alloc&
  361. _M_base_alloc() { return *this; }
  362. __buckets_ptr
  363. _M_allocate_buckets(size_type __bkt_count)
  364. {
  365. if (__builtin_expect(__bkt_count == 1, false))
  366. {
  367. _M_single_bucket = nullptr;
  368. return &_M_single_bucket;
  369. }
  370. return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
  371. }
  372. void
  373. _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
  374. {
  375. if (_M_uses_single_bucket(__bkts))
  376. return;
  377. __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
  378. }
  379. void
  380. _M_deallocate_buckets()
  381. { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
  382. // Gets bucket begin, deals with the fact that non-empty buckets contain
  383. // their before begin node.
  384. __node_ptr
  385. _M_bucket_begin(size_type __bkt) const;
  386. __node_ptr
  387. _M_begin() const
  388. { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
  389. // Assign *this using another _Hashtable instance. Whether elements
  390. // are copied or moved depends on the _Ht reference.
  391. template<typename _Ht>
  392. void
  393. _M_assign_elements(_Ht&&);
  394. template<typename _Ht, typename _NodeGenerator>
  395. void
  396. _M_assign(_Ht&&, const _NodeGenerator&);
  397. void
  398. _M_move_assign(_Hashtable&&, true_type);
  399. void
  400. _M_move_assign(_Hashtable&&, false_type);
  401. void
  402. _M_reset() noexcept;
  403. _Hashtable(const _Hash& __h, const _Equal& __eq,
  404. const allocator_type& __a)
  405. : __hashtable_base(__h, __eq),
  406. __hashtable_alloc(__node_alloc_type(__a))
  407. { }
  408. template<bool _No_realloc = true>
  409. static constexpr bool
  410. _S_nothrow_move()
  411. {
  412. #if __cplusplus <= 201402L
  413. return __and_<__bool_constant<_No_realloc>,
  414. is_nothrow_copy_constructible<_Hash>,
  415. is_nothrow_copy_constructible<_Equal>>::value;
  416. #else
  417. if constexpr (_No_realloc)
  418. if constexpr (is_nothrow_copy_constructible<_Hash>())
  419. return is_nothrow_copy_constructible<_Equal>();
  420. return false;
  421. #endif
  422. }
  423. _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
  424. true_type /* alloc always equal */)
  425. noexcept(_S_nothrow_move());
  426. _Hashtable(_Hashtable&&, __node_alloc_type&&,
  427. false_type /* alloc always equal */);
  428. template<typename _InputIterator>
  429. _Hashtable(_InputIterator __first, _InputIterator __last,
  430. size_type __bkt_count_hint,
  431. const _Hash&, const _Equal&, const allocator_type&,
  432. true_type __uks);
  433. template<typename _InputIterator>
  434. _Hashtable(_InputIterator __first, _InputIterator __last,
  435. size_type __bkt_count_hint,
  436. const _Hash&, const _Equal&, const allocator_type&,
  437. false_type __uks);
  438. public:
  439. // Constructor, destructor, assignment, swap
  440. _Hashtable() = default;
  441. _Hashtable(const _Hashtable&);
  442. _Hashtable(const _Hashtable&, const allocator_type&);
  443. explicit
  444. _Hashtable(size_type __bkt_count_hint,
  445. const _Hash& __hf = _Hash(),
  446. const key_equal& __eql = key_equal(),
  447. const allocator_type& __a = allocator_type());
  448. // Use delegating constructors.
  449. _Hashtable(_Hashtable&& __ht)
  450. noexcept(_S_nothrow_move())
  451. : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
  452. true_type{})
  453. { }
  454. _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
  455. noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
  456. : _Hashtable(std::move(__ht), __node_alloc_type(__a),
  457. typename __node_alloc_traits::is_always_equal{})
  458. { }
  459. explicit
  460. _Hashtable(const allocator_type& __a)
  461. : __hashtable_alloc(__node_alloc_type(__a))
  462. { }
  463. template<typename _InputIterator>
  464. _Hashtable(_InputIterator __f, _InputIterator __l,
  465. size_type __bkt_count_hint = 0,
  466. const _Hash& __hf = _Hash(),
  467. const key_equal& __eql = key_equal(),
  468. const allocator_type& __a = allocator_type())
  469. : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
  470. __unique_keys{})
  471. { }
  472. _Hashtable(initializer_list<value_type> __l,
  473. size_type __bkt_count_hint = 0,
  474. const _Hash& __hf = _Hash(),
  475. const key_equal& __eql = key_equal(),
  476. const allocator_type& __a = allocator_type())
  477. : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
  478. __hf, __eql, __a, __unique_keys{})
  479. { }
  480. _Hashtable&
  481. operator=(const _Hashtable& __ht);
  482. _Hashtable&
  483. operator=(_Hashtable&& __ht)
  484. noexcept(__node_alloc_traits::_S_nothrow_move()
  485. && is_nothrow_move_assignable<_Hash>::value
  486. && is_nothrow_move_assignable<_Equal>::value)
  487. {
  488. constexpr bool __move_storage =
  489. __node_alloc_traits::_S_propagate_on_move_assign()
  490. || __node_alloc_traits::_S_always_equal();
  491. _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
  492. return *this;
  493. }
  494. _Hashtable&
  495. operator=(initializer_list<value_type> __l)
  496. {
  497. __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
  498. _M_before_begin._M_nxt = nullptr;
  499. clear();
  500. // We consider that all elements of __l are going to be inserted.
  501. auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
  502. // Do not shrink to keep potential user reservation.
  503. if (_M_bucket_count < __l_bkt_count)
  504. rehash(__l_bkt_count);
  505. this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
  506. return *this;
  507. }
  508. ~_Hashtable() noexcept;
  509. void
  510. swap(_Hashtable&)
  511. noexcept(__and_<__is_nothrow_swappable<_Hash>,
  512. __is_nothrow_swappable<_Equal>>::value);
  513. // Basic container operations
  514. iterator
  515. begin() noexcept
  516. { return iterator(_M_begin()); }
  517. const_iterator
  518. begin() const noexcept
  519. { return const_iterator(_M_begin()); }
  520. iterator
  521. end() noexcept
  522. { return iterator(nullptr); }
  523. const_iterator
  524. end() const noexcept
  525. { return const_iterator(nullptr); }
  526. const_iterator
  527. cbegin() const noexcept
  528. { return const_iterator(_M_begin()); }
  529. const_iterator
  530. cend() const noexcept
  531. { return const_iterator(nullptr); }
  532. size_type
  533. size() const noexcept
  534. { return _M_element_count; }
  535. _GLIBCXX_NODISCARD bool
  536. empty() const noexcept
  537. { return size() == 0; }
  538. allocator_type
  539. get_allocator() const noexcept
  540. { return allocator_type(this->_M_node_allocator()); }
  541. size_type
  542. max_size() const noexcept
  543. { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
  544. // Observers
  545. key_equal
  546. key_eq() const
  547. { return this->_M_eq(); }
  548. // hash_function, if present, comes from _Hash_code_base.
  549. // Bucket operations
  550. size_type
  551. bucket_count() const noexcept
  552. { return _M_bucket_count; }
  553. size_type
  554. max_bucket_count() const noexcept
  555. { return max_size(); }
  556. size_type
  557. bucket_size(size_type __bkt) const
  558. { return std::distance(begin(__bkt), end(__bkt)); }
  559. size_type
  560. bucket(const key_type& __k) const
  561. { return _M_bucket_index(this->_M_hash_code(__k)); }
  562. local_iterator
  563. begin(size_type __bkt)
  564. {
  565. return local_iterator(*this, _M_bucket_begin(__bkt),
  566. __bkt, _M_bucket_count);
  567. }
  568. local_iterator
  569. end(size_type __bkt)
  570. { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
  571. const_local_iterator
  572. begin(size_type __bkt) const
  573. {
  574. return const_local_iterator(*this, _M_bucket_begin(__bkt),
  575. __bkt, _M_bucket_count);
  576. }
  577. const_local_iterator
  578. end(size_type __bkt) const
  579. { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
  580. // DR 691.
  581. const_local_iterator
  582. cbegin(size_type __bkt) const
  583. {
  584. return const_local_iterator(*this, _M_bucket_begin(__bkt),
  585. __bkt, _M_bucket_count);
  586. }
  587. const_local_iterator
  588. cend(size_type __bkt) const
  589. { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
  590. float
  591. load_factor() const noexcept
  592. {
  593. return static_cast<float>(size()) / static_cast<float>(bucket_count());
  594. }
  595. // max_load_factor, if present, comes from _Rehash_base.
  596. // Generalization of max_load_factor. Extension, not found in
  597. // TR1. Only useful if _RehashPolicy is something other than
  598. // the default.
  599. const _RehashPolicy&
  600. __rehash_policy() const
  601. { return _M_rehash_policy; }
  602. void
  603. __rehash_policy(const _RehashPolicy& __pol)
  604. { _M_rehash_policy = __pol; }
  605. // Lookup.
  606. iterator
  607. find(const key_type& __k);
  608. const_iterator
  609. find(const key_type& __k) const;
  610. size_type
  611. count(const key_type& __k) const;
  612. std::pair<iterator, iterator>
  613. equal_range(const key_type& __k);
  614. std::pair<const_iterator, const_iterator>
  615. equal_range(const key_type& __k) const;
  616. #if __cplusplus > 201702L
  617. template<typename _Kt,
  618. typename = __has_is_transparent_t<_Hash, _Kt>,
  619. typename = __has_is_transparent_t<_Equal, _Kt>>
  620. iterator
  621. _M_find_tr(const _Kt& __k);
  622. template<typename _Kt,
  623. typename = __has_is_transparent_t<_Hash, _Kt>,
  624. typename = __has_is_transparent_t<_Equal, _Kt>>
  625. const_iterator
  626. _M_find_tr(const _Kt& __k) const;
  627. template<typename _Kt,
  628. typename = __has_is_transparent_t<_Hash, _Kt>,
  629. typename = __has_is_transparent_t<_Equal, _Kt>>
  630. size_type
  631. _M_count_tr(const _Kt& __k) const;
  632. template<typename _Kt,
  633. typename = __has_is_transparent_t<_Hash, _Kt>,
  634. typename = __has_is_transparent_t<_Equal, _Kt>>
  635. pair<iterator, iterator>
  636. _M_equal_range_tr(const _Kt& __k);
  637. template<typename _Kt,
  638. typename = __has_is_transparent_t<_Hash, _Kt>,
  639. typename = __has_is_transparent_t<_Equal, _Kt>>
  640. pair<const_iterator, const_iterator>
  641. _M_equal_range_tr(const _Kt& __k) const;
  642. #endif
  643. private:
  644. // Bucket index computation helpers.
  645. size_type
  646. _M_bucket_index(const __node_value_type& __n) const noexcept
  647. { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
  648. size_type
  649. _M_bucket_index(__hash_code __c) const
  650. { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
  651. // Find and insert helper functions and types
  652. // Find the node before the one matching the criteria.
  653. __node_base_ptr
  654. _M_find_before_node(size_type, const key_type&, __hash_code) const;
  655. template<typename _Kt>
  656. __node_base_ptr
  657. _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
  658. __node_ptr
  659. _M_find_node(size_type __bkt, const key_type& __key,
  660. __hash_code __c) const
  661. {
  662. __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
  663. if (__before_n)
  664. return static_cast<__node_ptr>(__before_n->_M_nxt);
  665. return nullptr;
  666. }
  667. template<typename _Kt>
  668. __node_ptr
  669. _M_find_node_tr(size_type __bkt, const _Kt& __key,
  670. __hash_code __c) const
  671. {
  672. auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
  673. if (__before_n)
  674. return static_cast<__node_ptr>(__before_n->_M_nxt);
  675. return nullptr;
  676. }
  677. // Insert a node at the beginning of a bucket.
  678. void
  679. _M_insert_bucket_begin(size_type, __node_ptr);
  680. // Remove the bucket first node
  681. void
  682. _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
  683. size_type __next_bkt);
  684. // Get the node before __n in the bucket __bkt
  685. __node_base_ptr
  686. _M_get_previous_node(size_type __bkt, __node_ptr __n);
  687. // Insert node __n with hash code __code, in bucket __bkt if no
  688. // rehash (assumes no element with same key already present).
  689. // Takes ownership of __n if insertion succeeds, throws otherwise.
  690. iterator
  691. _M_insert_unique_node(size_type __bkt, __hash_code,
  692. __node_ptr __n, size_type __n_elt = 1);
  693. // Insert node __n with key __k and hash code __code.
  694. // Takes ownership of __n if insertion succeeds, throws otherwise.
  695. iterator
  696. _M_insert_multi_node(__node_ptr __hint,
  697. __hash_code __code, __node_ptr __n);
  698. template<typename... _Args>
  699. std::pair<iterator, bool>
  700. _M_emplace(true_type __uks, _Args&&... __args);
  701. template<typename... _Args>
  702. iterator
  703. _M_emplace(false_type __uks, _Args&&... __args)
  704. { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
  705. // Emplace with hint, useless when keys are unique.
  706. template<typename... _Args>
  707. iterator
  708. _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
  709. { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
  710. template<typename... _Args>
  711. iterator
  712. _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
  713. template<typename _Arg, typename _NodeGenerator>
  714. std::pair<iterator, bool>
  715. _M_insert(_Arg&&, const _NodeGenerator&, true_type __uks);
  716. template<typename _Arg, typename _NodeGenerator>
  717. iterator
  718. _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
  719. false_type __uks)
  720. {
  721. return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
  722. __uks);
  723. }
  724. // Insert with hint, not used when keys are unique.
  725. template<typename _Arg, typename _NodeGenerator>
  726. iterator
  727. _M_insert(const_iterator, _Arg&& __arg,
  728. const _NodeGenerator& __node_gen, true_type __uks)
  729. {
  730. return
  731. _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
  732. }
  733. // Insert with hint when keys are not unique.
  734. template<typename _Arg, typename _NodeGenerator>
  735. iterator
  736. _M_insert(const_iterator, _Arg&&,
  737. const _NodeGenerator&, false_type __uks);
  738. size_type
  739. _M_erase(true_type __uks, const key_type&);
  740. size_type
  741. _M_erase(false_type __uks, const key_type&);
  742. iterator
  743. _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
  744. public:
  745. // Emplace
  746. template<typename... _Args>
  747. __ireturn_type
  748. emplace(_Args&&... __args)
  749. { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
  750. template<typename... _Args>
  751. iterator
  752. emplace_hint(const_iterator __hint, _Args&&... __args)
  753. {
  754. return _M_emplace(__hint, __unique_keys{},
  755. std::forward<_Args>(__args)...);
  756. }
  757. // Insert member functions via inheritance.
  758. // Erase
  759. iterator
  760. erase(const_iterator);
  761. // LWG 2059.
  762. iterator
  763. erase(iterator __it)
  764. { return erase(const_iterator(__it)); }
  765. size_type
  766. erase(const key_type& __k)
  767. { return _M_erase(__unique_keys{}, __k); }
  768. iterator
  769. erase(const_iterator, const_iterator);
  770. void
  771. clear() noexcept;
  772. // Set number of buckets keeping it appropriate for container's number
  773. // of elements.
  774. void rehash(size_type __bkt_count);
  775. // DR 1189.
  776. // reserve, if present, comes from _Rehash_base.
  777. #if __cplusplus > 201402L
  778. /// Re-insert an extracted node into a container with unique keys.
  779. insert_return_type
  780. _M_reinsert_node(node_type&& __nh)
  781. {
  782. insert_return_type __ret;
  783. if (__nh.empty())
  784. __ret.position = end();
  785. else
  786. {
  787. __glibcxx_assert(get_allocator() == __nh.get_allocator());
  788. const key_type& __k = __nh._M_key();
  789. __hash_code __code = this->_M_hash_code(__k);
  790. size_type __bkt = _M_bucket_index(__code);
  791. if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
  792. {
  793. __ret.node = std::move(__nh);
  794. __ret.position = iterator(__n);
  795. __ret.inserted = false;
  796. }
  797. else
  798. {
  799. __ret.position
  800. = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
  801. __nh._M_ptr = nullptr;
  802. __ret.inserted = true;
  803. }
  804. }
  805. return __ret;
  806. }
  807. /// Re-insert an extracted node into a container with equivalent keys.
  808. iterator
  809. _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
  810. {
  811. if (__nh.empty())
  812. return end();
  813. __glibcxx_assert(get_allocator() == __nh.get_allocator());
  814. const key_type& __k = __nh._M_key();
  815. auto __code = this->_M_hash_code(__k);
  816. auto __ret
  817. = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
  818. __nh._M_ptr = nullptr;
  819. return __ret;
  820. }
  821. private:
  822. node_type
  823. _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
  824. {
  825. __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
  826. if (__prev_n == _M_buckets[__bkt])
  827. _M_remove_bucket_begin(__bkt, __n->_M_next(),
  828. __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
  829. else if (__n->_M_nxt)
  830. {
  831. size_type __next_bkt = _M_bucket_index(*__n->_M_next());
  832. if (__next_bkt != __bkt)
  833. _M_buckets[__next_bkt] = __prev_n;
  834. }
  835. __prev_n->_M_nxt = __n->_M_nxt;
  836. __n->_M_nxt = nullptr;
  837. --_M_element_count;
  838. return { __n, this->_M_node_allocator() };
  839. }
  840. public:
  841. // Extract a node.
  842. node_type
  843. extract(const_iterator __pos)
  844. {
  845. size_t __bkt = _M_bucket_index(*__pos._M_cur);
  846. return _M_extract_node(__bkt,
  847. _M_get_previous_node(__bkt, __pos._M_cur));
  848. }
  849. /// Extract a node.
  850. node_type
  851. extract(const _Key& __k)
  852. {
  853. node_type __nh;
  854. __hash_code __code = this->_M_hash_code(__k);
  855. std::size_t __bkt = _M_bucket_index(__code);
  856. if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
  857. __nh = _M_extract_node(__bkt, __prev_node);
  858. return __nh;
  859. }
  860. /// Merge from a compatible container into one with unique keys.
  861. template<typename _Compatible_Hashtable>
  862. void
  863. _M_merge_unique(_Compatible_Hashtable& __src) noexcept
  864. {
  865. static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
  866. node_type>, "Node types are compatible");
  867. __glibcxx_assert(get_allocator() == __src.get_allocator());
  868. auto __n_elt = __src.size();
  869. for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
  870. {
  871. auto __pos = __i++;
  872. const key_type& __k = _ExtractKey{}(*__pos);
  873. __hash_code __code = this->_M_hash_code(__k);
  874. size_type __bkt = _M_bucket_index(__code);
  875. if (_M_find_node(__bkt, __k, __code) == nullptr)
  876. {
  877. auto __nh = __src.extract(__pos);
  878. _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
  879. __nh._M_ptr = nullptr;
  880. __n_elt = 1;
  881. }
  882. else if (__n_elt != 1)
  883. --__n_elt;
  884. }
  885. }
  886. /// Merge from a compatible container into one with equivalent keys.
  887. template<typename _Compatible_Hashtable>
  888. void
  889. _M_merge_multi(_Compatible_Hashtable& __src) noexcept
  890. {
  891. static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
  892. node_type>, "Node types are compatible");
  893. __glibcxx_assert(get_allocator() == __src.get_allocator());
  894. this->reserve(size() + __src.size());
  895. for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
  896. _M_reinsert_node_multi(cend(), __src.extract(__i++));
  897. }
  898. #endif // C++17
  899. private:
  900. // Helper rehash method used when keys are unique.
  901. void _M_rehash_aux(size_type __bkt_count, true_type __uks);
  902. // Helper rehash method used when keys can be non-unique.
  903. void _M_rehash_aux(size_type __bkt_count, false_type __uks);
  904. // Unconditionally change size of bucket array to n, restore
  905. // hash policy state to __state on exception.
  906. void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
  907. };
  908. // Definitions of class template _Hashtable's out-of-line member functions.
  909. template<typename _Key, typename _Value, typename _Alloc,
  910. typename _ExtractKey, typename _Equal,
  911. typename _Hash, typename _RangeHash, typename _Unused,
  912. typename _RehashPolicy, typename _Traits>
  913. auto
  914. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  915. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  916. _M_bucket_begin(size_type __bkt) const
  917. -> __node_ptr
  918. {
  919. __node_base_ptr __n = _M_buckets[__bkt];
  920. return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
  921. }
  922. template<typename _Key, typename _Value, typename _Alloc,
  923. typename _ExtractKey, typename _Equal,
  924. typename _Hash, typename _RangeHash, typename _Unused,
  925. typename _RehashPolicy, typename _Traits>
  926. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  927. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  928. _Hashtable(size_type __bkt_count_hint,
  929. const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
  930. : _Hashtable(__h, __eq, __a)
  931. {
  932. auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
  933. if (__bkt_count > _M_bucket_count)
  934. {
  935. _M_buckets = _M_allocate_buckets(__bkt_count);
  936. _M_bucket_count = __bkt_count;
  937. }
  938. }
  939. template<typename _Key, typename _Value, typename _Alloc,
  940. typename _ExtractKey, typename _Equal,
  941. typename _Hash, typename _RangeHash, typename _Unused,
  942. typename _RehashPolicy, typename _Traits>
  943. template<typename _InputIterator>
  944. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  945. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  946. _Hashtable(_InputIterator __f, _InputIterator __l,
  947. size_type __bkt_count_hint,
  948. const _Hash& __h, const _Equal& __eq,
  949. const allocator_type& __a, true_type /* __uks */)
  950. : _Hashtable(__bkt_count_hint, __h, __eq, __a)
  951. {
  952. for (; __f != __l; ++__f)
  953. this->insert(*__f);
  954. }
  955. template<typename _Key, typename _Value, typename _Alloc,
  956. typename _ExtractKey, typename _Equal,
  957. typename _Hash, typename _RangeHash, typename _Unused,
  958. typename _RehashPolicy, typename _Traits>
  959. template<typename _InputIterator>
  960. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  961. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  962. _Hashtable(_InputIterator __f, _InputIterator __l,
  963. size_type __bkt_count_hint,
  964. const _Hash& __h, const _Equal& __eq,
  965. const allocator_type& __a, false_type /* __uks */)
  966. : _Hashtable(__h, __eq, __a)
  967. {
  968. auto __nb_elems = __detail::__distance_fw(__f, __l);
  969. auto __bkt_count =
  970. _M_rehash_policy._M_next_bkt(
  971. std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
  972. __bkt_count_hint));
  973. if (__bkt_count > _M_bucket_count)
  974. {
  975. _M_buckets = _M_allocate_buckets(__bkt_count);
  976. _M_bucket_count = __bkt_count;
  977. }
  978. for (; __f != __l; ++__f)
  979. this->insert(*__f);
  980. }
  981. template<typename _Key, typename _Value, typename _Alloc,
  982. typename _ExtractKey, typename _Equal,
  983. typename _Hash, typename _RangeHash, typename _Unused,
  984. typename _RehashPolicy, typename _Traits>
  985. auto
  986. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  987. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  988. operator=(const _Hashtable& __ht)
  989. -> _Hashtable&
  990. {
  991. if (&__ht == this)
  992. return *this;
  993. if (__node_alloc_traits::_S_propagate_on_copy_assign())
  994. {
  995. auto& __this_alloc = this->_M_node_allocator();
  996. auto& __that_alloc = __ht._M_node_allocator();
  997. if (!__node_alloc_traits::_S_always_equal()
  998. && __this_alloc != __that_alloc)
  999. {
  1000. // Replacement allocator cannot free existing storage.
  1001. this->_M_deallocate_nodes(_M_begin());
  1002. _M_before_begin._M_nxt = nullptr;
  1003. _M_deallocate_buckets();
  1004. _M_buckets = nullptr;
  1005. std::__alloc_on_copy(__this_alloc, __that_alloc);
  1006. __hashtable_base::operator=(__ht);
  1007. _M_bucket_count = __ht._M_bucket_count;
  1008. _M_element_count = __ht._M_element_count;
  1009. _M_rehash_policy = __ht._M_rehash_policy;
  1010. __alloc_node_gen_t __alloc_node_gen(*this);
  1011. __try
  1012. {
  1013. _M_assign(__ht, __alloc_node_gen);
  1014. }
  1015. __catch(...)
  1016. {
  1017. // _M_assign took care of deallocating all memory. Now we
  1018. // must make sure this instance remains in a usable state.
  1019. _M_reset();
  1020. __throw_exception_again;
  1021. }
  1022. return *this;
  1023. }
  1024. std::__alloc_on_copy(__this_alloc, __that_alloc);
  1025. }
  1026. // Reuse allocated buckets and nodes.
  1027. _M_assign_elements(__ht);
  1028. return *this;
  1029. }
  1030. template<typename _Key, typename _Value, typename _Alloc,
  1031. typename _ExtractKey, typename _Equal,
  1032. typename _Hash, typename _RangeHash, typename _Unused,
  1033. typename _RehashPolicy, typename _Traits>
  1034. template<typename _Ht>
  1035. void
  1036. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1037. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1038. _M_assign_elements(_Ht&& __ht)
  1039. {
  1040. __buckets_ptr __former_buckets = nullptr;
  1041. std::size_t __former_bucket_count = _M_bucket_count;
  1042. const __rehash_state& __former_state = _M_rehash_policy._M_state();
  1043. if (_M_bucket_count != __ht._M_bucket_count)
  1044. {
  1045. __former_buckets = _M_buckets;
  1046. _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
  1047. _M_bucket_count = __ht._M_bucket_count;
  1048. }
  1049. else
  1050. __builtin_memset(_M_buckets, 0,
  1051. _M_bucket_count * sizeof(__node_base_ptr));
  1052. __try
  1053. {
  1054. __hashtable_base::operator=(std::forward<_Ht>(__ht));
  1055. _M_element_count = __ht._M_element_count;
  1056. _M_rehash_policy = __ht._M_rehash_policy;
  1057. __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
  1058. _M_before_begin._M_nxt = nullptr;
  1059. _M_assign(std::forward<_Ht>(__ht), __roan);
  1060. if (__former_buckets)
  1061. _M_deallocate_buckets(__former_buckets, __former_bucket_count);
  1062. }
  1063. __catch(...)
  1064. {
  1065. if (__former_buckets)
  1066. {
  1067. // Restore previous buckets.
  1068. _M_deallocate_buckets();
  1069. _M_rehash_policy._M_reset(__former_state);
  1070. _M_buckets = __former_buckets;
  1071. _M_bucket_count = __former_bucket_count;
  1072. }
  1073. __builtin_memset(_M_buckets, 0,
  1074. _M_bucket_count * sizeof(__node_base_ptr));
  1075. __throw_exception_again;
  1076. }
  1077. }
  1078. template<typename _Key, typename _Value, typename _Alloc,
  1079. typename _ExtractKey, typename _Equal,
  1080. typename _Hash, typename _RangeHash, typename _Unused,
  1081. typename _RehashPolicy, typename _Traits>
  1082. template<typename _Ht, typename _NodeGenerator>
  1083. void
  1084. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1085. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1086. _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
  1087. {
  1088. __buckets_ptr __buckets = nullptr;
  1089. if (!_M_buckets)
  1090. _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
  1091. __try
  1092. {
  1093. if (!__ht._M_before_begin._M_nxt)
  1094. return;
  1095. // First deal with the special first node pointed to by
  1096. // _M_before_begin.
  1097. __node_ptr __ht_n = __ht._M_begin();
  1098. __node_ptr __this_n
  1099. = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
  1100. this->_M_copy_code(*__this_n, *__ht_n);
  1101. _M_update_bbegin(__this_n);
  1102. // Then deal with other nodes.
  1103. __node_ptr __prev_n = __this_n;
  1104. for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
  1105. {
  1106. __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
  1107. __prev_n->_M_nxt = __this_n;
  1108. this->_M_copy_code(*__this_n, *__ht_n);
  1109. size_type __bkt = _M_bucket_index(*__this_n);
  1110. if (!_M_buckets[__bkt])
  1111. _M_buckets[__bkt] = __prev_n;
  1112. __prev_n = __this_n;
  1113. }
  1114. }
  1115. __catch(...)
  1116. {
  1117. clear();
  1118. if (__buckets)
  1119. _M_deallocate_buckets();
  1120. __throw_exception_again;
  1121. }
  1122. }
  1123. template<typename _Key, typename _Value, typename _Alloc,
  1124. typename _ExtractKey, typename _Equal,
  1125. typename _Hash, typename _RangeHash, typename _Unused,
  1126. typename _RehashPolicy, typename _Traits>
  1127. void
  1128. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1129. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1130. _M_reset() noexcept
  1131. {
  1132. _M_rehash_policy._M_reset();
  1133. _M_bucket_count = 1;
  1134. _M_single_bucket = nullptr;
  1135. _M_buckets = &_M_single_bucket;
  1136. _M_before_begin._M_nxt = nullptr;
  1137. _M_element_count = 0;
  1138. }
  1139. template<typename _Key, typename _Value, typename _Alloc,
  1140. typename _ExtractKey, typename _Equal,
  1141. typename _Hash, typename _RangeHash, typename _Unused,
  1142. typename _RehashPolicy, typename _Traits>
  1143. void
  1144. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1145. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1146. _M_move_assign(_Hashtable&& __ht, true_type)
  1147. {
  1148. if (__builtin_expect(std::__addressof(__ht) == this, false))
  1149. return;
  1150. this->_M_deallocate_nodes(_M_begin());
  1151. _M_deallocate_buckets();
  1152. __hashtable_base::operator=(std::move(__ht));
  1153. _M_rehash_policy = __ht._M_rehash_policy;
  1154. if (!__ht._M_uses_single_bucket())
  1155. _M_buckets = __ht._M_buckets;
  1156. else
  1157. {
  1158. _M_buckets = &_M_single_bucket;
  1159. _M_single_bucket = __ht._M_single_bucket;
  1160. }
  1161. _M_bucket_count = __ht._M_bucket_count;
  1162. _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
  1163. _M_element_count = __ht._M_element_count;
  1164. std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
  1165. // Fix bucket containing the _M_before_begin pointer that can't be moved.
  1166. _M_update_bbegin();
  1167. __ht._M_reset();
  1168. }
  1169. template<typename _Key, typename _Value, typename _Alloc,
  1170. typename _ExtractKey, typename _Equal,
  1171. typename _Hash, typename _RangeHash, typename _Unused,
  1172. typename _RehashPolicy, typename _Traits>
  1173. void
  1174. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1175. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1176. _M_move_assign(_Hashtable&& __ht, false_type)
  1177. {
  1178. if (__ht._M_node_allocator() == this->_M_node_allocator())
  1179. _M_move_assign(std::move(__ht), true_type{});
  1180. else
  1181. {
  1182. // Can't move memory, move elements then.
  1183. _M_assign_elements(std::move(__ht));
  1184. __ht.clear();
  1185. }
  1186. }
  1187. template<typename _Key, typename _Value, typename _Alloc,
  1188. typename _ExtractKey, typename _Equal,
  1189. typename _Hash, typename _RangeHash, typename _Unused,
  1190. typename _RehashPolicy, typename _Traits>
  1191. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1192. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1193. _Hashtable(const _Hashtable& __ht)
  1194. : __hashtable_base(__ht),
  1195. __map_base(__ht),
  1196. __rehash_base(__ht),
  1197. __hashtable_alloc(
  1198. __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
  1199. _M_buckets(nullptr),
  1200. _M_bucket_count(__ht._M_bucket_count),
  1201. _M_element_count(__ht._M_element_count),
  1202. _M_rehash_policy(__ht._M_rehash_policy)
  1203. {
  1204. __alloc_node_gen_t __alloc_node_gen(*this);
  1205. _M_assign(__ht, __alloc_node_gen);
  1206. }
  1207. template<typename _Key, typename _Value, typename _Alloc,
  1208. typename _ExtractKey, typename _Equal,
  1209. typename _Hash, typename _RangeHash, typename _Unused,
  1210. typename _RehashPolicy, typename _Traits>
  1211. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1212. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1213. _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
  1214. true_type /* alloc always equal */)
  1215. noexcept(_S_nothrow_move())
  1216. : __hashtable_base(__ht),
  1217. __map_base(__ht),
  1218. __rehash_base(__ht),
  1219. __hashtable_alloc(std::move(__a)),
  1220. _M_buckets(__ht._M_buckets),
  1221. _M_bucket_count(__ht._M_bucket_count),
  1222. _M_before_begin(__ht._M_before_begin._M_nxt),
  1223. _M_element_count(__ht._M_element_count),
  1224. _M_rehash_policy(__ht._M_rehash_policy)
  1225. {
  1226. // Update buckets if __ht is using its single bucket.
  1227. if (__ht._M_uses_single_bucket())
  1228. {
  1229. _M_buckets = &_M_single_bucket;
  1230. _M_single_bucket = __ht._M_single_bucket;
  1231. }
  1232. // Fix bucket containing the _M_before_begin pointer that can't be moved.
  1233. _M_update_bbegin();
  1234. __ht._M_reset();
  1235. }
  1236. template<typename _Key, typename _Value, typename _Alloc,
  1237. typename _ExtractKey, typename _Equal,
  1238. typename _Hash, typename _RangeHash, typename _Unused,
  1239. typename _RehashPolicy, typename _Traits>
  1240. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1241. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1242. _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
  1243. : __hashtable_base(__ht),
  1244. __map_base(__ht),
  1245. __rehash_base(__ht),
  1246. __hashtable_alloc(__node_alloc_type(__a)),
  1247. _M_buckets(),
  1248. _M_bucket_count(__ht._M_bucket_count),
  1249. _M_element_count(__ht._M_element_count),
  1250. _M_rehash_policy(__ht._M_rehash_policy)
  1251. {
  1252. __alloc_node_gen_t __alloc_node_gen(*this);
  1253. _M_assign(__ht, __alloc_node_gen);
  1254. }
  1255. template<typename _Key, typename _Value, typename _Alloc,
  1256. typename _ExtractKey, typename _Equal,
  1257. typename _Hash, typename _RangeHash, typename _Unused,
  1258. typename _RehashPolicy, typename _Traits>
  1259. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1260. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1261. _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
  1262. false_type /* alloc always equal */)
  1263. : __hashtable_base(__ht),
  1264. __map_base(__ht),
  1265. __rehash_base(__ht),
  1266. __hashtable_alloc(std::move(__a)),
  1267. _M_buckets(nullptr),
  1268. _M_bucket_count(__ht._M_bucket_count),
  1269. _M_element_count(__ht._M_element_count),
  1270. _M_rehash_policy(__ht._M_rehash_policy)
  1271. {
  1272. if (__ht._M_node_allocator() == this->_M_node_allocator())
  1273. {
  1274. if (__ht._M_uses_single_bucket())
  1275. {
  1276. _M_buckets = &_M_single_bucket;
  1277. _M_single_bucket = __ht._M_single_bucket;
  1278. }
  1279. else
  1280. _M_buckets = __ht._M_buckets;
  1281. // Fix bucket containing the _M_before_begin pointer that can't be
  1282. // moved.
  1283. _M_update_bbegin(__ht._M_begin());
  1284. __ht._M_reset();
  1285. }
  1286. else
  1287. {
  1288. __alloc_node_gen_t __alloc_gen(*this);
  1289. using _Fwd_Ht = typename
  1290. conditional<__move_if_noexcept_cond<value_type>::value,
  1291. const _Hashtable&, _Hashtable&&>::type;
  1292. _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
  1293. __ht.clear();
  1294. }
  1295. }
  1296. template<typename _Key, typename _Value, typename _Alloc,
  1297. typename _ExtractKey, typename _Equal,
  1298. typename _Hash, typename _RangeHash, typename _Unused,
  1299. typename _RehashPolicy, typename _Traits>
  1300. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1301. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1302. ~_Hashtable() noexcept
  1303. {
  1304. clear();
  1305. _M_deallocate_buckets();
  1306. }
  1307. template<typename _Key, typename _Value, typename _Alloc,
  1308. typename _ExtractKey, typename _Equal,
  1309. typename _Hash, typename _RangeHash, typename _Unused,
  1310. typename _RehashPolicy, typename _Traits>
  1311. void
  1312. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1313. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1314. swap(_Hashtable& __x)
  1315. noexcept(__and_<__is_nothrow_swappable<_Hash>,
  1316. __is_nothrow_swappable<_Equal>>::value)
  1317. {
  1318. // The only base class with member variables is hash_code_base.
  1319. // We define _Hash_code_base::_M_swap because different
  1320. // specializations have different members.
  1321. this->_M_swap(__x);
  1322. std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
  1323. std::swap(_M_rehash_policy, __x._M_rehash_policy);
  1324. // Deal properly with potentially moved instances.
  1325. if (this->_M_uses_single_bucket())
  1326. {
  1327. if (!__x._M_uses_single_bucket())
  1328. {
  1329. _M_buckets = __x._M_buckets;
  1330. __x._M_buckets = &__x._M_single_bucket;
  1331. }
  1332. }
  1333. else if (__x._M_uses_single_bucket())
  1334. {
  1335. __x._M_buckets = _M_buckets;
  1336. _M_buckets = &_M_single_bucket;
  1337. }
  1338. else
  1339. std::swap(_M_buckets, __x._M_buckets);
  1340. std::swap(_M_bucket_count, __x._M_bucket_count);
  1341. std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
  1342. std::swap(_M_element_count, __x._M_element_count);
  1343. std::swap(_M_single_bucket, __x._M_single_bucket);
  1344. // Fix buckets containing the _M_before_begin pointers that can't be
  1345. // swapped.
  1346. _M_update_bbegin();
  1347. __x._M_update_bbegin();
  1348. }
  1349. template<typename _Key, typename _Value, typename _Alloc,
  1350. typename _ExtractKey, typename _Equal,
  1351. typename _Hash, typename _RangeHash, typename _Unused,
  1352. typename _RehashPolicy, typename _Traits>
  1353. auto
  1354. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1355. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1356. find(const key_type& __k)
  1357. -> iterator
  1358. {
  1359. __hash_code __code = this->_M_hash_code(__k);
  1360. std::size_t __bkt = _M_bucket_index(__code);
  1361. return iterator(_M_find_node(__bkt, __k, __code));
  1362. }
  1363. template<typename _Key, typename _Value, typename _Alloc,
  1364. typename _ExtractKey, typename _Equal,
  1365. typename _Hash, typename _RangeHash, typename _Unused,
  1366. typename _RehashPolicy, typename _Traits>
  1367. auto
  1368. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1369. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1370. find(const key_type& __k) const
  1371. -> const_iterator
  1372. {
  1373. __hash_code __code = this->_M_hash_code(__k);
  1374. std::size_t __bkt = _M_bucket_index(__code);
  1375. return const_iterator(_M_find_node(__bkt, __k, __code));
  1376. }
  1377. #if __cplusplus > 201703L
  1378. template<typename _Key, typename _Value, typename _Alloc,
  1379. typename _ExtractKey, typename _Equal,
  1380. typename _Hash, typename _RangeHash, typename _Unused,
  1381. typename _RehashPolicy, typename _Traits>
  1382. template<typename _Kt, typename, typename>
  1383. auto
  1384. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1385. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1386. _M_find_tr(const _Kt& __k)
  1387. -> iterator
  1388. {
  1389. __hash_code __code = this->_M_hash_code_tr(__k);
  1390. std::size_t __bkt = _M_bucket_index(__code);
  1391. return iterator(_M_find_node_tr(__bkt, __k, __code));
  1392. }
  1393. template<typename _Key, typename _Value, typename _Alloc,
  1394. typename _ExtractKey, typename _Equal,
  1395. typename _Hash, typename _RangeHash, typename _Unused,
  1396. typename _RehashPolicy, typename _Traits>
  1397. template<typename _Kt, typename, typename>
  1398. auto
  1399. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1400. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1401. _M_find_tr(const _Kt& __k) const
  1402. -> const_iterator
  1403. {
  1404. __hash_code __code = this->_M_hash_code_tr(__k);
  1405. std::size_t __bkt = _M_bucket_index(__code);
  1406. return const_iterator(_M_find_node_tr(__bkt, __k, __code));
  1407. }
  1408. #endif
  1409. template<typename _Key, typename _Value, typename _Alloc,
  1410. typename _ExtractKey, typename _Equal,
  1411. typename _Hash, typename _RangeHash, typename _Unused,
  1412. typename _RehashPolicy, typename _Traits>
  1413. auto
  1414. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1415. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1416. count(const key_type& __k) const
  1417. -> size_type
  1418. {
  1419. auto __it = find(__k);
  1420. if (!__it._M_cur)
  1421. return 0;
  1422. if (__unique_keys::value)
  1423. return 1;
  1424. // All equivalent values are next to each other, if we find a
  1425. // non-equivalent value after an equivalent one it means that we won't
  1426. // find any new equivalent value.
  1427. size_type __result = 1;
  1428. for (auto __ref = __it++;
  1429. __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
  1430. ++__it)
  1431. ++__result;
  1432. return __result;
  1433. }
  1434. #if __cplusplus > 201703L
  1435. template<typename _Key, typename _Value, typename _Alloc,
  1436. typename _ExtractKey, typename _Equal,
  1437. typename _Hash, typename _RangeHash, typename _Unused,
  1438. typename _RehashPolicy, typename _Traits>
  1439. template<typename _Kt, typename, typename>
  1440. auto
  1441. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1442. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1443. _M_count_tr(const _Kt& __k) const
  1444. -> size_type
  1445. {
  1446. __hash_code __code = this->_M_hash_code_tr(__k);
  1447. std::size_t __bkt = _M_bucket_index(__code);
  1448. auto __n = _M_find_node_tr(__bkt, __k, __code);
  1449. if (!__n)
  1450. return 0;
  1451. // All equivalent values are next to each other, if we find a
  1452. // non-equivalent value after an equivalent one it means that we won't
  1453. // find any new equivalent value.
  1454. iterator __it(__n);
  1455. size_type __result = 1;
  1456. for (++__it;
  1457. __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
  1458. ++__it)
  1459. ++__result;
  1460. return __result;
  1461. }
  1462. #endif
  1463. template<typename _Key, typename _Value, typename _Alloc,
  1464. typename _ExtractKey, typename _Equal,
  1465. typename _Hash, typename _RangeHash, typename _Unused,
  1466. typename _RehashPolicy, typename _Traits>
  1467. auto
  1468. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1469. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1470. equal_range(const key_type& __k)
  1471. -> pair<iterator, iterator>
  1472. {
  1473. auto __ite = find(__k);
  1474. if (!__ite._M_cur)
  1475. return { __ite, __ite };
  1476. auto __beg = __ite++;
  1477. if (__unique_keys::value)
  1478. return { __beg, __ite };
  1479. // All equivalent values are next to each other, if we find a
  1480. // non-equivalent value after an equivalent one it means that we won't
  1481. // find any new equivalent value.
  1482. while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
  1483. ++__ite;
  1484. return { __beg, __ite };
  1485. }
  1486. template<typename _Key, typename _Value, typename _Alloc,
  1487. typename _ExtractKey, typename _Equal,
  1488. typename _Hash, typename _RangeHash, typename _Unused,
  1489. typename _RehashPolicy, typename _Traits>
  1490. auto
  1491. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1492. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1493. equal_range(const key_type& __k) const
  1494. -> pair<const_iterator, const_iterator>
  1495. {
  1496. auto __ite = find(__k);
  1497. if (!__ite._M_cur)
  1498. return { __ite, __ite };
  1499. auto __beg = __ite++;
  1500. if (__unique_keys::value)
  1501. return { __beg, __ite };
  1502. // All equivalent values are next to each other, if we find a
  1503. // non-equivalent value after an equivalent one it means that we won't
  1504. // find any new equivalent value.
  1505. while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
  1506. ++__ite;
  1507. return { __beg, __ite };
  1508. }
  1509. #if __cplusplus > 201703L
  1510. template<typename _Key, typename _Value, typename _Alloc,
  1511. typename _ExtractKey, typename _Equal,
  1512. typename _Hash, typename _RangeHash, typename _Unused,
  1513. typename _RehashPolicy, typename _Traits>
  1514. template<typename _Kt, typename, typename>
  1515. auto
  1516. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1517. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1518. _M_equal_range_tr(const _Kt& __k)
  1519. -> pair<iterator, iterator>
  1520. {
  1521. __hash_code __code = this->_M_hash_code_tr(__k);
  1522. std::size_t __bkt = _M_bucket_index(__code);
  1523. auto __n = _M_find_node_tr(__bkt, __k, __code);
  1524. iterator __ite(__n);
  1525. if (!__n)
  1526. return { __ite, __ite };
  1527. // All equivalent values are next to each other, if we find a
  1528. // non-equivalent value after an equivalent one it means that we won't
  1529. // find any new equivalent value.
  1530. auto __beg = __ite++;
  1531. while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
  1532. ++__ite;
  1533. return { __beg, __ite };
  1534. }
  1535. template<typename _Key, typename _Value, typename _Alloc,
  1536. typename _ExtractKey, typename _Equal,
  1537. typename _Hash, typename _RangeHash, typename _Unused,
  1538. typename _RehashPolicy, typename _Traits>
  1539. template<typename _Kt, typename, typename>
  1540. auto
  1541. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1542. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1543. _M_equal_range_tr(const _Kt& __k) const
  1544. -> pair<const_iterator, const_iterator>
  1545. {
  1546. __hash_code __code = this->_M_hash_code_tr(__k);
  1547. std::size_t __bkt = _M_bucket_index(__code);
  1548. auto __n = _M_find_node_tr(__bkt, __k, __code);
  1549. const_iterator __ite(__n);
  1550. if (!__n)
  1551. return { __ite, __ite };
  1552. // All equivalent values are next to each other, if we find a
  1553. // non-equivalent value after an equivalent one it means that we won't
  1554. // find any new equivalent value.
  1555. auto __beg = __ite++;
  1556. while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
  1557. ++__ite;
  1558. return { __beg, __ite };
  1559. }
  1560. #endif
  1561. // Find the node before the one whose key compares equal to k in the bucket
  1562. // bkt. Return nullptr if no node is found.
  1563. template<typename _Key, typename _Value, typename _Alloc,
  1564. typename _ExtractKey, typename _Equal,
  1565. typename _Hash, typename _RangeHash, typename _Unused,
  1566. typename _RehashPolicy, typename _Traits>
  1567. auto
  1568. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1569. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1570. _M_find_before_node(size_type __bkt, const key_type& __k,
  1571. __hash_code __code) const
  1572. -> __node_base_ptr
  1573. {
  1574. __node_base_ptr __prev_p = _M_buckets[__bkt];
  1575. if (!__prev_p)
  1576. return nullptr;
  1577. for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
  1578. __p = __p->_M_next())
  1579. {
  1580. if (this->_M_equals(__k, __code, *__p))
  1581. return __prev_p;
  1582. if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
  1583. break;
  1584. __prev_p = __p;
  1585. }
  1586. return nullptr;
  1587. }
  1588. template<typename _Key, typename _Value, typename _Alloc,
  1589. typename _ExtractKey, typename _Equal,
  1590. typename _Hash, typename _RangeHash, typename _Unused,
  1591. typename _RehashPolicy, typename _Traits>
  1592. template<typename _Kt>
  1593. auto
  1594. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1595. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1596. _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
  1597. __hash_code __code) const
  1598. -> __node_base_ptr
  1599. {
  1600. __node_base_ptr __prev_p = _M_buckets[__bkt];
  1601. if (!__prev_p)
  1602. return nullptr;
  1603. for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
  1604. __p = __p->_M_next())
  1605. {
  1606. if (this->_M_equals_tr(__k, __code, *__p))
  1607. return __prev_p;
  1608. if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
  1609. break;
  1610. __prev_p = __p;
  1611. }
  1612. return nullptr;
  1613. }
  1614. template<typename _Key, typename _Value, typename _Alloc,
  1615. typename _ExtractKey, typename _Equal,
  1616. typename _Hash, typename _RangeHash, typename _Unused,
  1617. typename _RehashPolicy, typename _Traits>
  1618. void
  1619. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1620. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1621. _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
  1622. {
  1623. if (_M_buckets[__bkt])
  1624. {
  1625. // Bucket is not empty, we just need to insert the new node
  1626. // after the bucket before begin.
  1627. __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
  1628. _M_buckets[__bkt]->_M_nxt = __node;
  1629. }
  1630. else
  1631. {
  1632. // The bucket is empty, the new node is inserted at the
  1633. // beginning of the singly-linked list and the bucket will
  1634. // contain _M_before_begin pointer.
  1635. __node->_M_nxt = _M_before_begin._M_nxt;
  1636. _M_before_begin._M_nxt = __node;
  1637. if (__node->_M_nxt)
  1638. // We must update former begin bucket that is pointing to
  1639. // _M_before_begin.
  1640. _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
  1641. _M_buckets[__bkt] = &_M_before_begin;
  1642. }
  1643. }
  1644. template<typename _Key, typename _Value, typename _Alloc,
  1645. typename _ExtractKey, typename _Equal,
  1646. typename _Hash, typename _RangeHash, typename _Unused,
  1647. typename _RehashPolicy, typename _Traits>
  1648. void
  1649. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1650. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1651. _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
  1652. size_type __next_bkt)
  1653. {
  1654. if (!__next || __next_bkt != __bkt)
  1655. {
  1656. // Bucket is now empty
  1657. // First update next bucket if any
  1658. if (__next)
  1659. _M_buckets[__next_bkt] = _M_buckets[__bkt];
  1660. // Second update before begin node if necessary
  1661. if (&_M_before_begin == _M_buckets[__bkt])
  1662. _M_before_begin._M_nxt = __next;
  1663. _M_buckets[__bkt] = nullptr;
  1664. }
  1665. }
  1666. template<typename _Key, typename _Value, typename _Alloc,
  1667. typename _ExtractKey, typename _Equal,
  1668. typename _Hash, typename _RangeHash, typename _Unused,
  1669. typename _RehashPolicy, typename _Traits>
  1670. auto
  1671. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1672. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1673. _M_get_previous_node(size_type __bkt, __node_ptr __n)
  1674. -> __node_base_ptr
  1675. {
  1676. __node_base_ptr __prev_n = _M_buckets[__bkt];
  1677. while (__prev_n->_M_nxt != __n)
  1678. __prev_n = __prev_n->_M_nxt;
  1679. return __prev_n;
  1680. }
  1681. template<typename _Key, typename _Value, typename _Alloc,
  1682. typename _ExtractKey, typename _Equal,
  1683. typename _Hash, typename _RangeHash, typename _Unused,
  1684. typename _RehashPolicy, typename _Traits>
  1685. template<typename... _Args>
  1686. auto
  1687. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1688. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1689. _M_emplace(true_type /* __uks */, _Args&&... __args)
  1690. -> pair<iterator, bool>
  1691. {
  1692. // First build the node to get access to the hash code
  1693. _Scoped_node __node { this, std::forward<_Args>(__args)... };
  1694. const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
  1695. __hash_code __code = this->_M_hash_code(__k);
  1696. size_type __bkt = _M_bucket_index(__code);
  1697. if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
  1698. // There is already an equivalent node, no insertion
  1699. return std::make_pair(iterator(__p), false);
  1700. // Insert the node
  1701. auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
  1702. __node._M_node = nullptr;
  1703. return { __pos, true };
  1704. }
  1705. template<typename _Key, typename _Value, typename _Alloc,
  1706. typename _ExtractKey, typename _Equal,
  1707. typename _Hash, typename _RangeHash, typename _Unused,
  1708. typename _RehashPolicy, typename _Traits>
  1709. template<typename... _Args>
  1710. auto
  1711. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1712. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1713. _M_emplace(const_iterator __hint, false_type /* __uks */,
  1714. _Args&&... __args)
  1715. -> iterator
  1716. {
  1717. // First build the node to get its hash code.
  1718. _Scoped_node __node { this, std::forward<_Args>(__args)... };
  1719. const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
  1720. __hash_code __code = this->_M_hash_code(__k);
  1721. auto __pos
  1722. = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
  1723. __node._M_node = nullptr;
  1724. return __pos;
  1725. }
  1726. template<typename _Key, typename _Value, typename _Alloc,
  1727. typename _ExtractKey, typename _Equal,
  1728. typename _Hash, typename _RangeHash, typename _Unused,
  1729. typename _RehashPolicy, typename _Traits>
  1730. auto
  1731. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1732. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1733. _M_insert_unique_node(size_type __bkt, __hash_code __code,
  1734. __node_ptr __node, size_type __n_elt)
  1735. -> iterator
  1736. {
  1737. const __rehash_state& __saved_state = _M_rehash_policy._M_state();
  1738. std::pair<bool, std::size_t> __do_rehash
  1739. = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
  1740. __n_elt);
  1741. if (__do_rehash.first)
  1742. {
  1743. _M_rehash(__do_rehash.second, __saved_state);
  1744. __bkt = _M_bucket_index(__code);
  1745. }
  1746. this->_M_store_code(*__node, __code);
  1747. // Always insert at the beginning of the bucket.
  1748. _M_insert_bucket_begin(__bkt, __node);
  1749. ++_M_element_count;
  1750. return iterator(__node);
  1751. }
  1752. template<typename _Key, typename _Value, typename _Alloc,
  1753. typename _ExtractKey, typename _Equal,
  1754. typename _Hash, typename _RangeHash, typename _Unused,
  1755. typename _RehashPolicy, typename _Traits>
  1756. auto
  1757. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1758. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1759. _M_insert_multi_node(__node_ptr __hint,
  1760. __hash_code __code, __node_ptr __node)
  1761. -> iterator
  1762. {
  1763. const __rehash_state& __saved_state = _M_rehash_policy._M_state();
  1764. std::pair<bool, std::size_t> __do_rehash
  1765. = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
  1766. if (__do_rehash.first)
  1767. _M_rehash(__do_rehash.second, __saved_state);
  1768. this->_M_store_code(*__node, __code);
  1769. const key_type& __k = _ExtractKey{}(__node->_M_v());
  1770. size_type __bkt = _M_bucket_index(__code);
  1771. // Find the node before an equivalent one or use hint if it exists and
  1772. // if it is equivalent.
  1773. __node_base_ptr __prev
  1774. = __builtin_expect(__hint != nullptr, false)
  1775. && this->_M_equals(__k, __code, *__hint)
  1776. ? __hint
  1777. : _M_find_before_node(__bkt, __k, __code);
  1778. if (__prev)
  1779. {
  1780. // Insert after the node before the equivalent one.
  1781. __node->_M_nxt = __prev->_M_nxt;
  1782. __prev->_M_nxt = __node;
  1783. if (__builtin_expect(__prev == __hint, false))
  1784. // hint might be the last bucket node, in this case we need to
  1785. // update next bucket.
  1786. if (__node->_M_nxt
  1787. && !this->_M_equals(__k, __code, *__node->_M_next()))
  1788. {
  1789. size_type __next_bkt = _M_bucket_index(*__node->_M_next());
  1790. if (__next_bkt != __bkt)
  1791. _M_buckets[__next_bkt] = __node;
  1792. }
  1793. }
  1794. else
  1795. // The inserted node has no equivalent in the hashtable. We must
  1796. // insert the new node at the beginning of the bucket to preserve
  1797. // equivalent elements' relative positions.
  1798. _M_insert_bucket_begin(__bkt, __node);
  1799. ++_M_element_count;
  1800. return iterator(__node);
  1801. }
  1802. // Insert v if no element with its key is already present.
  1803. template<typename _Key, typename _Value, typename _Alloc,
  1804. typename _ExtractKey, typename _Equal,
  1805. typename _Hash, typename _RangeHash, typename _Unused,
  1806. typename _RehashPolicy, typename _Traits>
  1807. template<typename _Arg, typename _NodeGenerator>
  1808. auto
  1809. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1810. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1811. _M_insert(_Arg&& __v, const _NodeGenerator& __node_gen,
  1812. true_type /* __uks */)
  1813. -> pair<iterator, bool>
  1814. {
  1815. const key_type& __k = _ExtractKey{}(__v);
  1816. __hash_code __code = this->_M_hash_code(__k);
  1817. size_type __bkt = _M_bucket_index(__code);
  1818. if (__node_ptr __node = _M_find_node(__bkt, __k, __code))
  1819. return { iterator(__node), false };
  1820. _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
  1821. auto __pos
  1822. = _M_insert_unique_node(__bkt, __code, __node._M_node);
  1823. __node._M_node = nullptr;
  1824. return { __pos, true };
  1825. }
  1826. // Insert v unconditionally.
  1827. template<typename _Key, typename _Value, typename _Alloc,
  1828. typename _ExtractKey, typename _Equal,
  1829. typename _Hash, typename _RangeHash, typename _Unused,
  1830. typename _RehashPolicy, typename _Traits>
  1831. template<typename _Arg, typename _NodeGenerator>
  1832. auto
  1833. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1834. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1835. _M_insert(const_iterator __hint, _Arg&& __v,
  1836. const _NodeGenerator& __node_gen,
  1837. false_type /* __uks */)
  1838. -> iterator
  1839. {
  1840. // First compute the hash code so that we don't do anything if it
  1841. // throws.
  1842. __hash_code __code = this->_M_hash_code(_ExtractKey{}(__v));
  1843. // Second allocate new node so that we don't rehash if it throws.
  1844. _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
  1845. auto __pos
  1846. = _M_insert_multi_node(__hint._M_cur, __code, __node._M_node);
  1847. __node._M_node = nullptr;
  1848. return __pos;
  1849. }
  1850. template<typename _Key, typename _Value, typename _Alloc,
  1851. typename _ExtractKey, typename _Equal,
  1852. typename _Hash, typename _RangeHash, typename _Unused,
  1853. typename _RehashPolicy, typename _Traits>
  1854. auto
  1855. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1856. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1857. erase(const_iterator __it)
  1858. -> iterator
  1859. {
  1860. __node_ptr __n = __it._M_cur;
  1861. std::size_t __bkt = _M_bucket_index(*__n);
  1862. // Look for previous node to unlink it from the erased one, this
  1863. // is why we need buckets to contain the before begin to make
  1864. // this search fast.
  1865. __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
  1866. return _M_erase(__bkt, __prev_n, __n);
  1867. }
  1868. template<typename _Key, typename _Value, typename _Alloc,
  1869. typename _ExtractKey, typename _Equal,
  1870. typename _Hash, typename _RangeHash, typename _Unused,
  1871. typename _RehashPolicy, typename _Traits>
  1872. auto
  1873. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1874. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1875. _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
  1876. -> iterator
  1877. {
  1878. if (__prev_n == _M_buckets[__bkt])
  1879. _M_remove_bucket_begin(__bkt, __n->_M_next(),
  1880. __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
  1881. else if (__n->_M_nxt)
  1882. {
  1883. size_type __next_bkt = _M_bucket_index(*__n->_M_next());
  1884. if (__next_bkt != __bkt)
  1885. _M_buckets[__next_bkt] = __prev_n;
  1886. }
  1887. __prev_n->_M_nxt = __n->_M_nxt;
  1888. iterator __result(__n->_M_next());
  1889. this->_M_deallocate_node(__n);
  1890. --_M_element_count;
  1891. return __result;
  1892. }
  1893. template<typename _Key, typename _Value, typename _Alloc,
  1894. typename _ExtractKey, typename _Equal,
  1895. typename _Hash, typename _RangeHash, typename _Unused,
  1896. typename _RehashPolicy, typename _Traits>
  1897. auto
  1898. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1899. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1900. _M_erase(true_type /* __uks */, const key_type& __k)
  1901. -> size_type
  1902. {
  1903. __hash_code __code = this->_M_hash_code(__k);
  1904. std::size_t __bkt = _M_bucket_index(__code);
  1905. // Look for the node before the first matching node.
  1906. __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
  1907. if (!__prev_n)
  1908. return 0;
  1909. // We found a matching node, erase it.
  1910. __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
  1911. _M_erase(__bkt, __prev_n, __n);
  1912. return 1;
  1913. }
  1914. template<typename _Key, typename _Value, typename _Alloc,
  1915. typename _ExtractKey, typename _Equal,
  1916. typename _Hash, typename _RangeHash, typename _Unused,
  1917. typename _RehashPolicy, typename _Traits>
  1918. auto
  1919. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1920. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1921. _M_erase(false_type /* __uks */, const key_type& __k)
  1922. -> size_type
  1923. {
  1924. __hash_code __code = this->_M_hash_code(__k);
  1925. std::size_t __bkt = _M_bucket_index(__code);
  1926. // Look for the node before the first matching node.
  1927. __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
  1928. if (!__prev_n)
  1929. return 0;
  1930. // _GLIBCXX_RESOLVE_LIB_DEFECTS
  1931. // 526. Is it undefined if a function in the standard changes
  1932. // in parameters?
  1933. // We use one loop to find all matching nodes and another to deallocate
  1934. // them so that the key stays valid during the first loop. It might be
  1935. // invalidated indirectly when destroying nodes.
  1936. __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
  1937. __node_ptr __n_last = __n->_M_next();
  1938. while (__n_last && this->_M_node_equals(*__n, *__n_last))
  1939. __n_last = __n_last->_M_next();
  1940. std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
  1941. // Deallocate nodes.
  1942. size_type __result = 0;
  1943. do
  1944. {
  1945. __node_ptr __p = __n->_M_next();
  1946. this->_M_deallocate_node(__n);
  1947. __n = __p;
  1948. ++__result;
  1949. }
  1950. while (__n != __n_last);
  1951. _M_element_count -= __result;
  1952. if (__prev_n == _M_buckets[__bkt])
  1953. _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
  1954. else if (__n_last_bkt != __bkt)
  1955. _M_buckets[__n_last_bkt] = __prev_n;
  1956. __prev_n->_M_nxt = __n_last;
  1957. return __result;
  1958. }
  1959. template<typename _Key, typename _Value, typename _Alloc,
  1960. typename _ExtractKey, typename _Equal,
  1961. typename _Hash, typename _RangeHash, typename _Unused,
  1962. typename _RehashPolicy, typename _Traits>
  1963. auto
  1964. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  1965. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  1966. erase(const_iterator __first, const_iterator __last)
  1967. -> iterator
  1968. {
  1969. __node_ptr __n = __first._M_cur;
  1970. __node_ptr __last_n = __last._M_cur;
  1971. if (__n == __last_n)
  1972. return iterator(__n);
  1973. std::size_t __bkt = _M_bucket_index(*__n);
  1974. __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
  1975. bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
  1976. std::size_t __n_bkt = __bkt;
  1977. for (;;)
  1978. {
  1979. do
  1980. {
  1981. __node_ptr __tmp = __n;
  1982. __n = __n->_M_next();
  1983. this->_M_deallocate_node(__tmp);
  1984. --_M_element_count;
  1985. if (!__n)
  1986. break;
  1987. __n_bkt = _M_bucket_index(*__n);
  1988. }
  1989. while (__n != __last_n && __n_bkt == __bkt);
  1990. if (__is_bucket_begin)
  1991. _M_remove_bucket_begin(__bkt, __n, __n_bkt);
  1992. if (__n == __last_n)
  1993. break;
  1994. __is_bucket_begin = true;
  1995. __bkt = __n_bkt;
  1996. }
  1997. if (__n && (__n_bkt != __bkt || __is_bucket_begin))
  1998. _M_buckets[__n_bkt] = __prev_n;
  1999. __prev_n->_M_nxt = __n;
  2000. return iterator(__n);
  2001. }
  2002. template<typename _Key, typename _Value, typename _Alloc,
  2003. typename _ExtractKey, typename _Equal,
  2004. typename _Hash, typename _RangeHash, typename _Unused,
  2005. typename _RehashPolicy, typename _Traits>
  2006. void
  2007. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  2008. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  2009. clear() noexcept
  2010. {
  2011. this->_M_deallocate_nodes(_M_begin());
  2012. __builtin_memset(_M_buckets, 0,
  2013. _M_bucket_count * sizeof(__node_base_ptr));
  2014. _M_element_count = 0;
  2015. _M_before_begin._M_nxt = nullptr;
  2016. }
  2017. template<typename _Key, typename _Value, typename _Alloc,
  2018. typename _ExtractKey, typename _Equal,
  2019. typename _Hash, typename _RangeHash, typename _Unused,
  2020. typename _RehashPolicy, typename _Traits>
  2021. void
  2022. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  2023. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  2024. rehash(size_type __bkt_count)
  2025. {
  2026. const __rehash_state& __saved_state = _M_rehash_policy._M_state();
  2027. __bkt_count
  2028. = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
  2029. __bkt_count);
  2030. __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
  2031. if (__bkt_count != _M_bucket_count)
  2032. _M_rehash(__bkt_count, __saved_state);
  2033. else
  2034. // No rehash, restore previous state to keep it consistent with
  2035. // container state.
  2036. _M_rehash_policy._M_reset(__saved_state);
  2037. }
  2038. template<typename _Key, typename _Value, typename _Alloc,
  2039. typename _ExtractKey, typename _Equal,
  2040. typename _Hash, typename _RangeHash, typename _Unused,
  2041. typename _RehashPolicy, typename _Traits>
  2042. void
  2043. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  2044. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  2045. _M_rehash(size_type __bkt_count, const __rehash_state& __state)
  2046. {
  2047. __try
  2048. {
  2049. _M_rehash_aux(__bkt_count, __unique_keys{});
  2050. }
  2051. __catch(...)
  2052. {
  2053. // A failure here means that buckets allocation failed. We only
  2054. // have to restore hash policy previous state.
  2055. _M_rehash_policy._M_reset(__state);
  2056. __throw_exception_again;
  2057. }
  2058. }
  2059. // Rehash when there is no equivalent elements.
  2060. template<typename _Key, typename _Value, typename _Alloc,
  2061. typename _ExtractKey, typename _Equal,
  2062. typename _Hash, typename _RangeHash, typename _Unused,
  2063. typename _RehashPolicy, typename _Traits>
  2064. void
  2065. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  2066. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  2067. _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
  2068. {
  2069. __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
  2070. __node_ptr __p = _M_begin();
  2071. _M_before_begin._M_nxt = nullptr;
  2072. std::size_t __bbegin_bkt = 0;
  2073. while (__p)
  2074. {
  2075. __node_ptr __next = __p->_M_next();
  2076. std::size_t __bkt
  2077. = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
  2078. if (!__new_buckets[__bkt])
  2079. {
  2080. __p->_M_nxt = _M_before_begin._M_nxt;
  2081. _M_before_begin._M_nxt = __p;
  2082. __new_buckets[__bkt] = &_M_before_begin;
  2083. if (__p->_M_nxt)
  2084. __new_buckets[__bbegin_bkt] = __p;
  2085. __bbegin_bkt = __bkt;
  2086. }
  2087. else
  2088. {
  2089. __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
  2090. __new_buckets[__bkt]->_M_nxt = __p;
  2091. }
  2092. __p = __next;
  2093. }
  2094. _M_deallocate_buckets();
  2095. _M_bucket_count = __bkt_count;
  2096. _M_buckets = __new_buckets;
  2097. }
  2098. // Rehash when there can be equivalent elements, preserve their relative
  2099. // order.
  2100. template<typename _Key, typename _Value, typename _Alloc,
  2101. typename _ExtractKey, typename _Equal,
  2102. typename _Hash, typename _RangeHash, typename _Unused,
  2103. typename _RehashPolicy, typename _Traits>
  2104. void
  2105. _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
  2106. _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
  2107. _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
  2108. {
  2109. __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
  2110. __node_ptr __p = _M_begin();
  2111. _M_before_begin._M_nxt = nullptr;
  2112. std::size_t __bbegin_bkt = 0;
  2113. std::size_t __prev_bkt = 0;
  2114. __node_ptr __prev_p = nullptr;
  2115. bool __check_bucket = false;
  2116. while (__p)
  2117. {
  2118. __node_ptr __next = __p->_M_next();
  2119. std::size_t __bkt
  2120. = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
  2121. if (__prev_p && __prev_bkt == __bkt)
  2122. {
  2123. // Previous insert was already in this bucket, we insert after
  2124. // the previously inserted one to preserve equivalent elements
  2125. // relative order.
  2126. __p->_M_nxt = __prev_p->_M_nxt;
  2127. __prev_p->_M_nxt = __p;
  2128. // Inserting after a node in a bucket require to check that we
  2129. // haven't change the bucket last node, in this case next
  2130. // bucket containing its before begin node must be updated. We
  2131. // schedule a check as soon as we move out of the sequence of
  2132. // equivalent nodes to limit the number of checks.
  2133. __check_bucket = true;
  2134. }
  2135. else
  2136. {
  2137. if (__check_bucket)
  2138. {
  2139. // Check if we shall update the next bucket because of
  2140. // insertions into __prev_bkt bucket.
  2141. if (__prev_p->_M_nxt)
  2142. {
  2143. std::size_t __next_bkt
  2144. = __hash_code_base::_M_bucket_index(
  2145. *__prev_p->_M_next(), __bkt_count);
  2146. if (__next_bkt != __prev_bkt)
  2147. __new_buckets[__next_bkt] = __prev_p;
  2148. }
  2149. __check_bucket = false;
  2150. }
  2151. if (!__new_buckets[__bkt])
  2152. {
  2153. __p->_M_nxt = _M_before_begin._M_nxt;
  2154. _M_before_begin._M_nxt = __p;
  2155. __new_buckets[__bkt] = &_M_before_begin;
  2156. if (__p->_M_nxt)
  2157. __new_buckets[__bbegin_bkt] = __p;
  2158. __bbegin_bkt = __bkt;
  2159. }
  2160. else
  2161. {
  2162. __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
  2163. __new_buckets[__bkt]->_M_nxt = __p;
  2164. }
  2165. }
  2166. __prev_p = __p;
  2167. __prev_bkt = __bkt;
  2168. __p = __next;
  2169. }
  2170. if (__check_bucket && __prev_p->_M_nxt)
  2171. {
  2172. std::size_t __next_bkt
  2173. = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
  2174. __bkt_count);
  2175. if (__next_bkt != __prev_bkt)
  2176. __new_buckets[__next_bkt] = __prev_p;
  2177. }
  2178. _M_deallocate_buckets();
  2179. _M_bucket_count = __bkt_count;
  2180. _M_buckets = __new_buckets;
  2181. }
  2182. #if __cplusplus > 201402L
  2183. template<typename, typename, typename> class _Hash_merge_helper { };
  2184. #endif // C++17
  2185. #if __cpp_deduction_guides >= 201606
  2186. // Used to constrain deduction guides
  2187. template<typename _Hash>
  2188. using _RequireNotAllocatorOrIntegral
  2189. = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
  2190. #endif
  2191. _GLIBCXX_END_NAMESPACE_VERSION
  2192. } // namespace std
  2193. #endif // _HASHTABLE_H