1 /*
2     Copyright (c) 2005-2022 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_hash_map_H
18 #define __TBB_concurrent_hash_map_H
19 
20 #include "detail/_namespace_injection.h"
21 #include "detail/_utils.h"
22 #include "detail/_assert.h"
23 #include "detail/_allocator_traits.h"
24 #include "detail/_containers_helpers.h"
25 #include "detail/_template_helpers.h"
26 #include "detail/_hash_compare.h"
27 #include "detail/_range_common.h"
28 #include "tbb_allocator.h"
29 #include "spin_rw_mutex.h"
30 
31 #include <atomic>
32 #include <initializer_list>
33 #include <tuple>
34 #include <iterator>
35 #include <utility>      // Need std::pair
36 #include <cstring>      // Need std::memset
37 
38 namespace tbb {
39 namespace detail {
40 namespace d2 {
41 
42 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS && __TBB_CPP20_CONCEPTS_PRESENT
43 template <typename Mutex>
44 concept ch_map_rw_scoped_lockable = rw_scoped_lockable<Mutex> &&
requires(const typename Mutex::scoped_lock & sl)45 	requires(const typename Mutex::scoped_lock& sl) {
46 		{ sl.is_writer() } -> std::convertible_to<bool>;
47 };
48 #endif
49 
50 template <typename MutexType>
51 struct hash_map_node_base : no_copy {
52     using mutex_type = MutexType;
53     // Scoped lock type for mutex
54     using scoped_type = typename MutexType::scoped_lock;
55     // Next node in chain
56     hash_map_node_base* next;
57     mutex_type mutex;
58 };
59 
60 // Incompleteness flag value
61 static void* const rehash_req_flag = reinterpret_cast<void*>(std::size_t(3));
62 // Rehashed empty bucket flag
63 static void* const empty_rehashed_flag = reinterpret_cast<void*>(std::size_t(0));
64 
65 template <typename MutexType>
rehash_required(hash_map_node_base<MutexType> * node_ptr)66 bool rehash_required( hash_map_node_base<MutexType>* node_ptr ) {
67     return reinterpret_cast<void*>(node_ptr) == rehash_req_flag;
68 }
69 
70 #if TBB_USE_ASSERT
71 template <typename MutexType>
empty_rehashed(hash_map_node_base<MutexType> * node_ptr)72 bool empty_rehashed( hash_map_node_base<MutexType>* node_ptr ) {
73     return reinterpret_cast<void*>(node_ptr) == empty_rehashed_flag;
74 }
75 #endif
76 
77 // base class of concurrent_hash_map
78 
79 template <typename Allocator, typename MutexType>
80 class hash_map_base {
81 public:
82     using size_type = std::size_t;
83     using hashcode_type = std::size_t;
84     using segment_index_type = std::size_t;
85     using node_base = hash_map_node_base<MutexType>;
86 
87     struct bucket : no_copy {
88         using mutex_type = MutexType;
89         using scoped_type = typename mutex_type::scoped_lock;
90 
bucketbucket91         bucket() : node_list(nullptr) {}
bucketbucket92         bucket( node_base* ptr ) : node_list(ptr) {}
93 
94         mutex_type mutex;
95         std::atomic<node_base*> node_list;
96     };
97 
98     using allocator_type = Allocator;
99     using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
100     using bucket_allocator_type = typename allocator_traits_type::template rebind_alloc<bucket>;
101     using bucket_allocator_traits = tbb::detail::allocator_traits<bucket_allocator_type>;
102 
103     // Count of segments in the first block
104     static constexpr size_type embedded_block = 1;
105     // Count of segments in the first block
106     static constexpr size_type embedded_buckets = 1 << embedded_block;
107     // Count of segments in the first block
108     static constexpr size_type first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
109     // Size of a pointer / table size
110     static constexpr size_type pointers_per_table = sizeof(segment_index_type) * 8; // one segment per bit
111 
112     using segment_ptr_type = bucket*;
113     using atomic_segment_type = std::atomic<segment_ptr_type>;
114     using segments_table_type = atomic_segment_type[pointers_per_table];
115 
hash_map_base(const allocator_type & alloc)116     hash_map_base( const allocator_type& alloc ) : my_allocator(alloc), my_mask(embedded_buckets - 1), my_size(0) {
117         for (size_type i = 0; i != embedded_buckets; ++i) {
118             my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed);
119         }
120 
121         for (size_type segment_index = 0; segment_index < pointers_per_table; ++segment_index) {
122             auto argument = segment_index < embedded_block ? my_embedded_segment + segment_base(segment_index) : nullptr;
123             my_table[segment_index].store(argument, std::memory_order_relaxed);
124         }
125 
126         __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
127     }
128 
129     // segment index of given index in the array
segment_index_of(size_type index)130     static segment_index_type segment_index_of( size_type index ) {
131         return segment_index_type(tbb::detail::log2( index|1 ));
132     }
133 
134     // the first array index of given segment
segment_base(segment_index_type k)135     static segment_index_type segment_base( segment_index_type k ) {
136         return (segment_index_type(1) << k & ~segment_index_type(1));
137     }
138 
139     // segment size except for k == 0
segment_size(segment_index_type k)140     static size_type segment_size( segment_index_type k ) {
141         return size_type(1) << k; // fake value for k==0
142     }
143 
144     // true if ptr is valid pointer
is_valid(void * ptr)145     static bool is_valid( void* ptr ) {
146         return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
147     }
148 
149     template <typename... Args>
init_buckets_impl(segment_ptr_type ptr,size_type sz,const Args &...args)150     void init_buckets_impl( segment_ptr_type ptr, size_type sz, const Args&... args ) {
151         for (size_type i = 0; i < sz; ++i) {
152             bucket_allocator_traits::construct(my_allocator, ptr + i, args...);
153         }
154     }
155 
156     // Initialize buckets
init_buckets(segment_ptr_type ptr,size_type sz,bool is_initial)157     void init_buckets( segment_ptr_type ptr, size_type sz, bool is_initial ) {
158         if (is_initial) {
159             init_buckets_impl(ptr, sz);
160         } else {
161             init_buckets_impl(ptr, sz, reinterpret_cast<node_base*>(rehash_req_flag));
162         }
163     }
164 
165     // Add node n to bucket b
add_to_bucket(bucket * b,node_base * n)166     static void add_to_bucket( bucket* b, node_base* n ) {
167         __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr);
168         n->next = b->node_list.load(std::memory_order_relaxed);
169         b->node_list.store(n, std::memory_order_relaxed); // its under lock and flag is set
170     }
171 
get_allocator()172     const bucket_allocator_type& get_allocator() const {
173         return my_allocator;
174     }
175 
get_allocator()176     bucket_allocator_type& get_allocator() {
177         return my_allocator;
178     }
179 
180     // Enable segment
181     void enable_segment( segment_index_type k, bool is_initial = false ) {
182         __TBB_ASSERT( k, "Zero segment must be embedded" );
183         size_type sz;
184         __TBB_ASSERT( !is_valid(my_table[k].load(std::memory_order_relaxed)), "Wrong concurrent assignment");
185         if (k >= first_block) {
186             sz = segment_size(k);
187             segment_ptr_type ptr = nullptr;
188             try_call( [&] {
189                 ptr = bucket_allocator_traits::allocate(my_allocator, sz);
190             } ).on_exception( [&] {
191                 my_table[k].store(nullptr, std::memory_order_relaxed);
192             });
193 
194             __TBB_ASSERT(ptr, nullptr);
195             init_buckets(ptr, sz, is_initial);
196             my_table[k].store(ptr, std::memory_order_release);
197             sz <<= 1;// double it to get entire capacity of the container
198         } else { // the first block
199             __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
200             sz = segment_size(first_block);
201             segment_ptr_type ptr = nullptr;
202             try_call( [&] {
203                 ptr = bucket_allocator_traits::allocate(my_allocator, sz - embedded_buckets);
204             } ).on_exception( [&] {
205                 my_table[k].store(nullptr, std::memory_order_relaxed);
206             });
207 
208             __TBB_ASSERT(ptr, nullptr);
209             init_buckets(ptr, sz - embedded_buckets, is_initial);
210             ptr -= segment_base(embedded_block);
211             for(segment_index_type i = embedded_block; i < first_block; i++) // calc the offsets
212                 my_table[i].store(ptr + segment_base(i), std::memory_order_release);
213         }
214         my_mask.store(sz-1, std::memory_order_release);
215     }
216 
delete_segment(segment_index_type s)217     void delete_segment( segment_index_type s ) {
218         segment_ptr_type buckets_ptr = my_table[s].load(std::memory_order_relaxed);
219         size_type sz = segment_size( s ? s : 1 );
220 
221         size_type deallocate_size = 0;
222 
223         if (s >= first_block) { // the first segment or the next
224             deallocate_size = sz;
225         } else if (s == embedded_block && embedded_block != first_block) {
226             deallocate_size = segment_size(first_block) - embedded_buckets;
227         }
228 
229         for (size_type i = 0; i < deallocate_size; ++i) {
230             bucket_allocator_traits::destroy(my_allocator, buckets_ptr + i);
231         }
232         if (deallocate_size != 0) {
233             bucket_allocator_traits::deallocate(my_allocator, buckets_ptr, deallocate_size);
234         }
235 
236         if (s >= embedded_block) my_table[s].store(nullptr, std::memory_order_relaxed);
237     }
238 
239     // Get bucket by (masked) hashcode
get_bucket(hashcode_type h)240     bucket *get_bucket( hashcode_type h ) const noexcept {
241         segment_index_type s = segment_index_of( h );
242         h -= segment_base(s);
243         segment_ptr_type seg = my_table[s].load(std::memory_order_acquire);
244         __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
245         return &seg[h];
246     }
247 
248     // detail serial rehashing helper
mark_rehashed_levels(hashcode_type h)249     void mark_rehashed_levels( hashcode_type h ) noexcept {
250         segment_index_type s = segment_index_of( h );
251         while (segment_ptr_type seg = my_table[++s].load(std::memory_order_relaxed))
252             if (rehash_required(seg[h].node_list.load(std::memory_order_relaxed))) {
253                 seg[h].node_list.store(reinterpret_cast<node_base*>(empty_rehashed_flag), std::memory_order_relaxed);
254                 mark_rehashed_levels( h + ((hashcode_type)1<<s) ); // optimized segment_base(s)
255             }
256     }
257 
258     // Check for mask race
259     // Splitting into two functions should help inlining
check_mask_race(const hashcode_type h,hashcode_type & m)260     inline bool check_mask_race( const hashcode_type h, hashcode_type &m ) const {
261         hashcode_type m_now, m_old = m;
262         m_now = my_mask.load(std::memory_order_acquire);
263         if (m_old != m_now) {
264             return check_rehashing_collision(h, m_old, m = m_now);
265         }
266         return false;
267     }
268 
269     // Process mask race, check for rehashing collision
check_rehashing_collision(const hashcode_type h,hashcode_type m_old,hashcode_type m)270     bool check_rehashing_collision( const hashcode_type h, hashcode_type m_old, hashcode_type m ) const {
271         __TBB_ASSERT(m_old != m, nullptr); // TODO?: m arg could be optimized out by passing h = h&m
272         if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
273             // condition above proves that 'h' has some other bits set beside 'm_old'
274             // find next applicable mask after m_old    //TODO: look at bsl instruction
275             for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
276                 ;
277             m_old = (m_old<<1) - 1; // get full mask from a bit
278             __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, nullptr);
279             // check whether it is rehashing/ed
280             if (!rehash_required(get_bucket(h & m_old)->node_list.load(std::memory_order_acquire))) {
281                 return true;
282             }
283         }
284         return false;
285     }
286 
287     // Insert a node and check for load factor. @return segment index to enable.
insert_new_node(bucket * b,node_base * n,hashcode_type mask)288     segment_index_type insert_new_node( bucket *b, node_base *n, hashcode_type mask ) {
289         size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
290         add_to_bucket( b, n );
291         // check load factor
292         if( sz >= mask ) { // TODO: add custom load_factor
293             segment_index_type new_seg = tbb::detail::log2( mask+1 ); //optimized segment_index_of
294             __TBB_ASSERT( is_valid(my_table[new_seg-1].load(std::memory_order_relaxed)), "new allocations must not publish new mask until segment has allocated");
295             static const segment_ptr_type is_allocating = segment_ptr_type(2);
296             segment_ptr_type disabled = nullptr;
297             if (!(my_table[new_seg].load(std::memory_order_acquire))
298                 && my_table[new_seg].compare_exchange_strong(disabled, is_allocating))
299                 return new_seg; // The value must be processed
300         }
301         return 0;
302     }
303 
304     // Prepare enough segments for number of buckets
reserve(size_type buckets)305     void reserve(size_type buckets) {
306         if( !buckets-- ) return;
307         bool is_initial = !my_size.load(std::memory_order_relaxed);
308         for (size_type m = my_mask.load(std::memory_order_relaxed); buckets > m;
309             m = my_mask.load(std::memory_order_relaxed))
310         {
311             enable_segment( segment_index_of( m+1 ), is_initial );
312         }
313     }
314 
315     // Swap hash_map_bases
internal_swap_content(hash_map_base & table)316     void internal_swap_content(hash_map_base &table) {
317         using std::swap;
318         swap_atomics_relaxed(my_mask, table.my_mask);
319         swap_atomics_relaxed(my_size, table.my_size);
320 
321         for(size_type i = 0; i < embedded_buckets; i++) {
322             auto temp = my_embedded_segment[i].node_list.load(std::memory_order_relaxed);
323             my_embedded_segment[i].node_list.store(table.my_embedded_segment[i].node_list.load(std::memory_order_relaxed),
324                 std::memory_order_relaxed);
325             table.my_embedded_segment[i].node_list.store(temp, std::memory_order_relaxed);
326         }
327         for(size_type i = embedded_block; i < pointers_per_table; i++) {
328             auto temp = my_table[i].load(std::memory_order_relaxed);
329             my_table[i].store(table.my_table[i].load(std::memory_order_relaxed),
330                 std::memory_order_relaxed);
331             table.my_table[i].store(temp, std::memory_order_relaxed);
332         }
333     }
334 
internal_move(hash_map_base && other)335     void internal_move(hash_map_base&& other) {
336         my_mask.store(other.my_mask.load(std::memory_order_relaxed), std::memory_order_relaxed);
337         other.my_mask.store(embedded_buckets - 1, std::memory_order_relaxed);
338 
339         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
340         other.my_size.store(0, std::memory_order_relaxed);
341 
342         for (size_type i = 0; i < embedded_buckets; ++i) {
343             my_embedded_segment[i].node_list.store(other.my_embedded_segment[i].node_list, std::memory_order_relaxed);
344             other.my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed);
345         }
346 
347         for (size_type i = embedded_block; i < pointers_per_table; ++i) {
348             my_table[i].store(other.my_table[i].load(std::memory_order_relaxed),
349                 std::memory_order_relaxed);
350             other.my_table[i].store(nullptr, std::memory_order_relaxed);
351         }
352     }
353 
354 protected:
355     bucket_allocator_type my_allocator;
356     // Hash mask = sum of allocated segment sizes - 1
357     std::atomic<hashcode_type> my_mask;
358     // Size of container in stored items
359     std::atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
360     // Zero segment
361     bucket my_embedded_segment[embedded_buckets];
362     // Segment pointers table. Also prevents false sharing between my_mask and my_size
363     segments_table_type my_table;
364 };
365 
366 template <typename Iterator>
367 class hash_map_range;
368 
369 // Meets requirements of a forward iterator for STL
370 // Value is either the T or const T type of the container.
371 template <typename Container, typename Value>
372 class hash_map_iterator {
373     using map_type = Container;
374     using node = typename Container::node;
375     using map_base = typename Container::base_type;
376     using node_base = typename map_base::node_base;
377     using bucket = typename map_base::bucket;
378 public:
379     using value_type = Value;
380     using size_type = typename Container::size_type;
381     using difference_type = typename Container::difference_type;
382     using pointer = value_type*;
383     using reference = value_type&;
384     using iterator_category = std::forward_iterator_tag;
385 
386     // Construct undefined iterator
hash_map_iterator()387     hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
hash_map_iterator(const hash_map_iterator<Container,typename Container::value_type> & other)388     hash_map_iterator( const hash_map_iterator<Container, typename Container::value_type>& other ) :
389         my_map(other.my_map),
390         my_index(other.my_index),
391         my_bucket(other.my_bucket),
392         my_node(other.my_node)
393     {}
394 
395     hash_map_iterator& operator=( const hash_map_iterator<Container, typename Container::value_type>& other ) {
396         my_map = other.my_map;
397         my_index = other.my_index;
398         my_bucket = other.my_bucket;
399         my_node = other.my_node;
400         return *this;
401     }
402 
403     Value& operator*() const {
404         __TBB_ASSERT( map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
405         return my_node->value();
406     }
407 
408     Value* operator->() const {return &operator*();}
409 
410     hash_map_iterator& operator++() {
411         my_node = static_cast<node*>( my_node->next );
412         if( !my_node ) advance_to_next_bucket();
413         return *this;
414     }
415 
416     // Post increment
417     hash_map_iterator operator++(int) {
418         hash_map_iterator old(*this);
419         operator++();
420         return old;
421     }
422 private:
423     template <typename C, typename T, typename U>
424     friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
425 
426     template <typename C, typename T, typename U>
427     friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
428 
429     template <typename C, typename T, typename U>
430     friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
431 
432     template <typename C, typename U>
433     friend class hash_map_iterator;
434 
435     template <typename I>
436     friend class hash_map_range;
437 
advance_to_next_bucket()438     void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
439         size_t k = my_index+1;
440         __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
441         while (k <= my_map->my_mask.load(std::memory_order_relaxed)) {
442             // Following test uses 2's-complement wizardry
443             if( k&(k-2) ) // not the beginning of a segment
444                 ++my_bucket;
445             else my_bucket = my_map->get_bucket( k );
446             node_base *n = my_bucket->node_list.load(std::memory_order_relaxed);
447             if( map_base::is_valid(n) ) {
448                 my_node = static_cast<node*>(n);
449                 my_index = k;
450                 return;
451             }
452             ++k;
453         }
454         my_bucket = nullptr; my_node = nullptr; my_index = k; // the end
455     }
456 
457     template <typename Key, typename T, typename HashCompare, typename A
458 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
459             , typename M
460              >
461         __TBB_requires(tbb::detail::hash_compare<HashCompare, Key> &&
462                        ch_map_rw_scoped_lockable<M>)
463 #else
464              >
465         __TBB_requires(tbb::detail::hash_compare<HashCompare, Key>)
466 #endif
467     friend class concurrent_hash_map;
468 
hash_map_iterator(const Container & map,std::size_t index,const bucket * b,node_base * n)469     hash_map_iterator( const Container &map, std::size_t index, const bucket *b, node_base *n ) :
470         my_map(&map), my_index(index), my_bucket(b), my_node(static_cast<node*>(n))
471     {
472         if( b && !map_base::is_valid(n) )
473             advance_to_next_bucket();
474     }
475 
476     // concurrent_hash_map over which we are iterating.
477     const Container *my_map;
478     // Index in hash table for current item
479     size_t my_index;
480     // Pointer to bucket
481     const bucket* my_bucket;
482     // Pointer to node that has current item
483     node* my_node;
484 };
485 
486 template <typename Container, typename T, typename U>
487 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
488     return i.my_node == j.my_node && i.my_map == j.my_map;
489 }
490 
491 template <typename Container, typename T, typename U>
492 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
493     return i.my_node != j.my_node || i.my_map != j.my_map;
494 }
495 
496 // Range class used with concurrent_hash_map
497 template <typename Iterator>
498 class hash_map_range {
499     using map_type = typename Iterator::map_type;
500 public:
501     // Type for size of a range
502     using size_type = std::size_t;
503     using value_type = typename Iterator::value_type;
504     using reference = typename Iterator::reference;
505     using difference_type = typename Iterator::difference_type;
506     using iterator = Iterator;
507 
508     // True if range is empty.
empty()509     bool empty() const { return my_begin == my_end; }
510 
511     // True if range can be partitioned into two subranges.
is_divisible()512     bool is_divisible() const {
513         return my_midpoint != my_end;
514     }
515 
516     // Split range.
hash_map_range(hash_map_range & r,split)517     hash_map_range( hash_map_range& r, split ) :
518         my_end(r.my_end),
519         my_grainsize(r.my_grainsize)
520     {
521         r.my_end = my_begin = r.my_midpoint;
522         __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
523         __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
524         set_midpoint();
525         r.set_midpoint();
526     }
527 
528     // Init range with container and grainsize specified
529     hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
530         my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list.load(std::memory_order_relaxed) ) ),
531         my_end( Iterator( map, map.my_mask.load(std::memory_order_relaxed) + 1, nullptr, nullptr ) ),
532         my_grainsize( grainsize_ )
533     {
534         __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
535         set_midpoint();
536     }
537 
begin()538     Iterator begin() const { return my_begin; }
end()539     Iterator end() const { return my_end; }
540     // The grain size for this range.
grainsize()541     size_type grainsize() const { return my_grainsize; }
542 
543 private:
544     Iterator my_begin;
545     Iterator my_end;
546     mutable Iterator my_midpoint;
547     size_t my_grainsize;
548     // Set my_midpoint to point approximately half way between my_begin and my_end.
549     void set_midpoint() const;
550     template <typename U> friend class hash_map_range;
551 };
552 
553 template <typename Iterator>
set_midpoint()554 void hash_map_range<Iterator>::set_midpoint() const {
555     // Split by groups of nodes
556     size_t m = my_end.my_index-my_begin.my_index;
557     if( m > my_grainsize ) {
558         m = my_begin.my_index + m/2u;
559         auto b = my_begin.my_map->get_bucket(m);
560         my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list.load(std::memory_order_relaxed));
561     } else {
562         my_midpoint = my_end;
563     }
564     __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
565         "my_begin is after my_midpoint" );
566     __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
567         "my_midpoint is after my_end" );
568     __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
569         "[my_begin, my_midpoint) range should not be empty" );
570 }
571 
572 template <typename Key, typename T,
573           typename HashCompare = d1::tbb_hash_compare<Key>,
574           typename Allocator = tbb_allocator<std::pair<const Key, T>>
575 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
576         , typename MutexType = spin_rw_mutex
577          >
__TBB_requires(tbb::detail::hash_compare<HashCompare,Key> && ch_map_rw_scoped_lockable<MutexType>)578     __TBB_requires(tbb::detail::hash_compare<HashCompare, Key> &&
579                    ch_map_rw_scoped_lockable<MutexType>)
580 #else
581          >
582     __TBB_requires(tbb::detail::hash_compare<HashCompare, Key>)
583 #endif
584 class concurrent_hash_map
585 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
586     : protected hash_map_base<Allocator, MutexType>
587 #else
588     : protected hash_map_base<Allocator, spin_rw_mutex>
589 #endif
590 {
591     template <typename Container, typename Value>
592     friend class hash_map_iterator;
593 
594     template <typename I>
595     friend class hash_map_range;
596     using allocator_traits_type = tbb::detail::allocator_traits<Allocator>;
597 
598 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
599     using base_type = hash_map_base<Allocator, MutexType>;
600 #else
601     using base_type = hash_map_base<Allocator, spin_rw_mutex>;
602 #endif
603 public:
604     using key_type = Key;
605     using mapped_type = T;
606     // type_identity is needed to disable implicit deduction guides for std::initializer_list constructors
607     // and copy/move constructor with explicit allocator argument
608     using allocator_type = tbb::detail::type_identity_t<Allocator>;
609     using hash_compare_type = tbb::detail::type_identity_t<HashCompare>;
610     using value_type = std::pair<const Key, T>;
611     using size_type = typename base_type::size_type;
612     using difference_type = std::ptrdiff_t;
613 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
614     using mutex_type = MutexType;
615 #endif
616     using pointer = typename allocator_traits_type::pointer;
617     using const_pointer = typename allocator_traits_type::const_pointer;
618 
619     using reference = value_type&;
620     using const_reference = const value_type&;
621     using iterator = hash_map_iterator<concurrent_hash_map, value_type>;
622     using const_iterator = hash_map_iterator<concurrent_hash_map, const value_type>;
623     using range_type = hash_map_range<iterator>;
624     using const_range_type = hash_map_range<const_iterator>;
625 
626 protected:
627     static_assert(std::is_same<value_type, typename Allocator::value_type>::value,
628         "value_type of the container must be the same as its allocator's");
629 
630     friend class const_accessor;
631     class node;
632     using segment_index_type = typename base_type::segment_index_type;
633     using segment_ptr_type = typename base_type::segment_ptr_type;
634     using node_base = typename base_type::node_base;
635     using bucket = typename base_type::bucket;
636     using hashcode_type = typename base_type::hashcode_type;
637     using bucket_allocator_type = typename base_type::bucket_allocator_type;
638     using node_allocator_type = typename base_type::allocator_traits_type::template rebind_alloc<node>;
639     using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>;
640     hash_compare_type my_hash_compare;
641 
642     class node : public node_base {
643     public:
644         node() {}
645         ~node() {}
646         pointer storage() { return &my_value; }
647         value_type& value() { return *storage(); }
648     private:
649         union {
650             value_type my_value;
651         };
652     };
653 
654     void delete_node( node_base *n ) {
655         node_allocator_type node_allocator(this->get_allocator());
656         node_allocator_traits::destroy(node_allocator, static_cast<node*>(n)->storage());
657         node_allocator_traits::destroy(node_allocator, static_cast<node*>(n));
658         node_allocator_traits::deallocate(node_allocator, static_cast<node*>(n), 1);
659     }
660 
661     template <typename... Args>
662     static node* create_node(bucket_allocator_type& allocator, Args&&... args) {
663         node_allocator_type node_allocator(allocator);
664         node* node_ptr = node_allocator_traits::allocate(node_allocator, 1);
665         auto guard = make_raii_guard([&] {
666             node_allocator_traits::destroy(node_allocator, node_ptr);
667             node_allocator_traits::deallocate(node_allocator, node_ptr, 1);
668         });
669 
670         node_allocator_traits::construct(node_allocator, node_ptr);
671         node_allocator_traits::construct(node_allocator, node_ptr->storage(), std::forward<Args>(args)...);
672         guard.dismiss();
673         return node_ptr;
674     }
675 
676     static node* allocate_node_copy_construct(bucket_allocator_type& allocator, const Key &key, const T * t){
677         return create_node(allocator, key, *t);
678     }
679 
680     static node* allocate_node_move_construct(bucket_allocator_type& allocator, const Key &key, const T * t){
681         return create_node(allocator, key, std::move(*const_cast<T*>(t)));
682     }
683 
684     template <typename K = Key>
685     static node* allocate_node_default_construct(bucket_allocator_type& allocator, const K &key, const T * ){
686         // Emplace construct an empty T object inside the pair
687         return create_node(allocator, std::piecewise_construct,
688                            std::forward_as_tuple(key), std::forward_as_tuple());
689     }
690 
691     static node* do_not_allocate_node(bucket_allocator_type& , const Key &, const T * ){
692         __TBB_ASSERT(false,"this dummy function should not be called");
693         return nullptr;
694     }
695 
696     template <typename K>
697     node *search_bucket( const K &key, bucket *b ) const {
698         node *n = static_cast<node*>( b->node_list.load(std::memory_order_relaxed) );
699         while (this->is_valid(n) && !my_hash_compare.equal(key, n->value().first))
700             n = static_cast<node*>( n->next );
701         __TBB_ASSERT(!rehash_required(n), "Search can be executed only for rehashed bucket");
702         return n;
703     }
704 
705     // bucket accessor is to find, rehash, acquire a lock, and access a bucket
706     class bucket_accessor : public bucket::scoped_type {
707         bucket *my_b;
708     public:
709         bucket_accessor( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) { acquire( base, h, writer ); }
710         // find a bucket by masked hashcode, optionally rehash, and acquire the lock
711         inline void acquire( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) {
712             my_b = base->get_bucket( h );
713             // TODO: actually, notification is unnecessary here, just hiding double-check
714             if (rehash_required(my_b->node_list.load(std::memory_order_acquire))
715                 && bucket::scoped_type::try_acquire( my_b->mutex, /*write=*/true ) )
716             {
717                 if (rehash_required(my_b->node_list.load(std::memory_order_relaxed))) base->rehash_bucket(my_b, h); // recursive rehashing
718             }
719             else bucket::scoped_type::acquire( my_b->mutex, writer );
720             __TBB_ASSERT(!rehash_required(my_b->node_list.load(std::memory_order_relaxed)), nullptr);
721         }
722 
723         // get bucket pointer
724         bucket *operator() () { return my_b; }
725     };
726 
727     // TODO refactor to hash_base
728     void rehash_bucket( bucket *b_new, const hashcode_type hash ) {
729         __TBB_ASSERT( hash > 1, "The lowermost buckets can't be rehashed" );
730         b_new->node_list.store(reinterpret_cast<node_base*>(empty_rehashed_flag), std::memory_order_release); // mark rehashed
731         hashcode_type mask = (hashcode_type(1) << tbb::detail::log2(hash)) - 1; // get parent mask from the topmost bit
732         bucket_accessor b_old( this, hash & mask );
733 
734         mask = (mask<<1) | 1; // get full mask for new bucket
735         __TBB_ASSERT( (mask&(mask+1))==0 && (hash & mask) == hash, nullptr );
736     restart:
737         node_base* prev = nullptr;
738         node_base* curr = b_old()->node_list.load(std::memory_order_acquire);
739         while (this->is_valid(curr)) {
740             hashcode_type curr_node_hash = my_hash_compare.hash(static_cast<node*>(curr)->value().first);
741 
742             if ((curr_node_hash & mask) == hash) {
743                 if (!b_old.is_writer()) {
744                     if (!b_old.upgrade_to_writer()) {
745                         goto restart; // node ptr can be invalid due to concurrent erase
746                     }
747                 }
748                 node_base* next = curr->next;
749                 // exclude from b_old
750                 if (prev == nullptr) {
751                     b_old()->node_list.store(curr->next, std::memory_order_relaxed);
752                 } else {
753                     prev->next = curr->next;
754                 }
755                 this->add_to_bucket(b_new, curr);
756                 curr = next;
757             } else {
758                 prev = curr;
759                 curr = curr->next;
760             }
761         }
762     }
763 
764     template <typename U>
765     using hash_compare_is_transparent = dependent_bool<comp_is_transparent<hash_compare_type>, U>;
766 
767 public:
768 
769     class accessor;
770     // Combines data access, locking, and garbage collection.
771     class const_accessor : private node::scoped_type /*which derived from no_copy*/ {
772 #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
773         friend class concurrent_hash_map<Key,T,HashCompare,Allocator,MutexType>;
774 #else
775         friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
776 #endif
777         friend class accessor;
778     public:
779         // Type of value
780         using value_type = const typename concurrent_hash_map::value_type;
781 
782         // True if result is empty.
783         bool empty() const { return !my_node; }
784 
785         // Set to null
786         void release() {
787             if( my_node ) {
788                 node::scoped_type::release();
789                 my_node = nullptr;
790             }
791         }
792 
793         // Return reference to associated value in hash table.
794         const_reference operator*() const {
795             __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
796             return my_node->value();
797         }
798 
799         // Return pointer to associated value in hash table.
800         const_pointer operator->() const {
801             return &operator*();
802         }
803 
804         // Create empty result
805         const_accessor() : my_node(nullptr), my_hash() {}
806 
807         // Destroy result after releasing the underlying reference.
808         ~const_accessor() {
809             my_node = nullptr; // scoped lock's release() is called in its destructor
810         }
811     protected:
812         bool is_writer() { return node::scoped_type::is_writer(); }
813         node *my_node;
814         hashcode_type my_hash;
815     };
816 
817     // Allows write access to elements and combines data access, locking, and garbage collection.
818     class accessor: public const_accessor {
819     public:
820         // Type of value
821         using value_type = typename concurrent_hash_map::value_type;
822 
823         // Return reference to associated value in hash table.
824         reference operator*() const {
825             __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
826             return this->my_node->value();
827         }
828 
829         // Return pointer to associated value in hash table.
830         pointer operator->() const {
831             return &operator*();
832         }
833     };
834 
835     explicit concurrent_hash_map( const hash_compare_type& compare, const allocator_type& a = allocator_type() )
836         : base_type(a)
837         , my_hash_compare(compare)
838     {}
839 
840     concurrent_hash_map() : concurrent_hash_map(hash_compare_type()) {}
841 
842     explicit concurrent_hash_map( const allocator_type& a )
843         : concurrent_hash_map(hash_compare_type(), a)
844     {}
845 
846     // Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
847     concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
848         : concurrent_hash_map(a)
849     {
850         this->reserve(n);
851     }
852 
853     concurrent_hash_map( size_type n, const hash_compare_type& compare, const allocator_type& a = allocator_type() )
854         : concurrent_hash_map(compare, a)
855     {
856         this->reserve(n);
857     }
858 
859     // Copy constructor
860     concurrent_hash_map( const concurrent_hash_map &table )
861         : concurrent_hash_map(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
862     {
863         try_call( [&] {
864             internal_copy(table);
865         }).on_exception( [&] {
866             this->clear();
867         });
868     }
869 
870     concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
871         : concurrent_hash_map(a)
872     {
873         try_call( [&] {
874             internal_copy(table);
875         }).on_exception( [&] {
876             this->clear();
877         });
878     }
879 
880     // Move constructor
881     concurrent_hash_map( concurrent_hash_map &&table )
882         : concurrent_hash_map(std::move(table.get_allocator()))
883     {
884         this->internal_move(std::move(table));
885     }
886 
887     // Move constructor
888     concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
889         : concurrent_hash_map(a)
890     {
891         using is_equal_type = typename node_allocator_traits::is_always_equal;
892         internal_move_construct_with_allocator(std::move(table), a, is_equal_type());
893     }
894 
895     // Construction with copying iteration range and given allocator instance
896     template <typename I>
897     concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
898         : concurrent_hash_map(a)
899     {
900         try_call( [&] {
901             internal_copy(first, last, std::distance(first, last));
902         }).on_exception( [&] {
903             this->clear();
904         });
905     }
906 
907     template <typename I>
908     concurrent_hash_map( I first, I last, const hash_compare_type& compare, const allocator_type& a = allocator_type() )
909         : concurrent_hash_map(compare, a)
910     {
911         try_call( [&] {
912             internal_copy(first, last, std::distance(first, last));
913         }).on_exception( [&] {
914             this->clear();
915         });
916     }
917 
918     concurrent_hash_map( std::initializer_list<value_type> il, const hash_compare_type& compare = hash_compare_type(), const allocator_type& a = allocator_type() )
919         : concurrent_hash_map(compare, a)
920     {
921         try_call( [&] {
922             internal_copy(il.begin(), il.end(), il.size());
923         }).on_exception( [&] {
924             this->clear();
925         });
926     }
927 
928     concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type& a )
929         : concurrent_hash_map(il, hash_compare_type(), a) {}
930 
931     // Assignment
932     concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
933         if( this != &table ) {
934             clear();
935             copy_assign_allocators(this->my_allocator, table.my_allocator);
936             internal_copy(table);
937         }
938         return *this;
939     }
940 
941     // Move Assignment
942     concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
943         if( this != &table ) {
944             using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment;
945             using is_equal_type = typename node_allocator_traits::is_always_equal;
946             move_assign_allocators(this->my_allocator, table.my_allocator);
947             internal_move_assign(std::move(table), tbb::detail::disjunction<is_equal_type, pocma_type>());
948         }
949         return *this;
950     }
951 
952     // Assignment
953     concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
954         clear();
955         internal_copy(il.begin(), il.end(), il.size());
956         return *this;
957     }
958 
959     // Rehashes and optionally resizes the whole table.
960     /** Useful to optimize performance before or after concurrent operations.
961         Also enables using of find() and count() concurrent methods in serial context. */
962     void rehash(size_type sz = 0) {
963         this->reserve(sz); // TODO: add reduction of number of buckets as well
964         hashcode_type mask = this->my_mask.load(std::memory_order_relaxed);
965         hashcode_type b = (mask+1)>>1; // size or first index of the last segment
966         __TBB_ASSERT((b&(b-1))==0, nullptr); // zero or power of 2
967         bucket *bp = this->get_bucket( b ); // only the last segment should be scanned for rehashing
968         for(; b <= mask; b++, bp++ ) {
969             node_base *n = bp->node_list.load(std::memory_order_relaxed);
970             __TBB_ASSERT( this->is_valid(n) || empty_rehashed(n) || rehash_required(n), "Broken internal structure" );
971             __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
972             if (rehash_required(n)) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
973                 hashcode_type h = b; bucket *b_old = bp;
974                 do {
975                     __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
976                     hashcode_type m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit
977                     b_old = this->get_bucket( h &= m );
978                 } while( rehash_required(b_old->node_list.load(std::memory_order_relaxed)) );
979                 // now h - is index of the root rehashed bucket b_old
980                 this->mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
981                 node_base* prev = nullptr;
982                 node_base* curr = b_old->node_list.load(std::memory_order_relaxed);
983                 while (this->is_valid(curr)) {
984                     hashcode_type curr_node_hash = my_hash_compare.hash(static_cast<node*>(curr)->value().first);
985 
986                     if ((curr_node_hash & mask) != h) { // should be rehashed
987                         node_base* next = curr->next;
988                         // exclude from b_old
989                         if (prev == nullptr) {
990                             b_old->node_list.store(curr->next, std::memory_order_relaxed);
991                         } else {
992                             prev->next = curr->next;
993                         }
994                         bucket *b_new = this->get_bucket(curr_node_hash & mask);
995                         __TBB_ASSERT(!rehash_required(b_new->node_list.load(std::memory_order_relaxed)), "hash() function changed for key in table or internal error");
996                         this->add_to_bucket(b_new, curr);
997                         curr = next;
998                     } else {
999                         prev = curr;
1000                         curr = curr->next;
1001                     }
1002                 }
1003             }
1004         }
1005     }
1006 
1007     // Clear table
1008     void clear() {
1009         hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
1010         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1011         this->my_size.store(0, std::memory_order_relaxed);
1012         segment_index_type s = this->segment_index_of( m );
1013         __TBB_ASSERT( s+1 == this->pointers_per_table || !this->my_table[s+1].load(std::memory_order_relaxed), "wrong mask or concurrent grow" );
1014         do {
1015             __TBB_ASSERT(this->is_valid(this->my_table[s].load(std::memory_order_relaxed)), "wrong mask or concurrent grow" );
1016             segment_ptr_type buckets_ptr = this->my_table[s].load(std::memory_order_relaxed);
1017             size_type sz = this->segment_size( s ? s : 1 );
1018             for( segment_index_type i = 0; i < sz; i++ )
1019                 for( node_base *n = buckets_ptr[i].node_list.load(std::memory_order_relaxed);
1020                     this->is_valid(n); n = buckets_ptr[i].node_list.load(std::memory_order_relaxed) )
1021                 {
1022                     buckets_ptr[i].node_list.store(n->next, std::memory_order_relaxed);
1023                     delete_node( n );
1024                 }
1025             this->delete_segment(s);
1026         } while(s-- > 0);
1027         this->my_mask.store(this->embedded_buckets - 1, std::memory_order_relaxed);
1028     }
1029 
1030     // Clear table and destroy it.
1031     ~concurrent_hash_map() { clear(); }
1032 
1033     //------------------------------------------------------------------------
1034     // Parallel algorithm support
1035     //------------------------------------------------------------------------
1036     range_type range( size_type grainsize=1 ) {
1037         return range_type( *this, grainsize );
1038     }
1039     const_range_type range( size_type grainsize=1 ) const {
1040         return const_range_type( *this, grainsize );
1041     }
1042 
1043     //------------------------------------------------------------------------
1044     // STL support - not thread-safe methods
1045     //------------------------------------------------------------------------
1046     iterator begin() { return iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
1047     const_iterator begin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
1048     const_iterator cbegin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
1049     iterator end() { return iterator( *this, 0, nullptr, nullptr ); }
1050     const_iterator end() const { return const_iterator( *this, 0, nullptr, nullptr ); }
1051     const_iterator cend() const { return const_iterator( *this, 0, nullptr, nullptr ); }
1052     std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
1053     std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
1054 
1055     template <typename K>
1056     typename std::enable_if<hash_compare_is_transparent<K>::value,
1057                             std::pair<iterator, iterator>>::type equal_range( const K& key ) {
1058         return internal_equal_range(key, end());
1059     }
1060 
1061     template <typename K>
1062     typename std::enable_if<hash_compare_is_transparent<K>::value,
1063                             std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const {
1064         return internal_equal_range(key, end());
1065     }
1066 
1067     // Number of items in table.
1068     size_type size() const { return this->my_size.load(std::memory_order_acquire); }
1069 
1070     // True if size()==0.
1071     __TBB_nodiscard bool empty() const { return size() == 0; }
1072 
1073     // Upper bound on size.
1074     size_type max_size() const {
1075         return allocator_traits_type::max_size(base_type::get_allocator());
1076     }
1077 
1078     // Returns the current number of buckets
1079     size_type bucket_count() const { return this->my_mask.load(std::memory_order_relaxed) + 1; }
1080 
1081     // return allocator object
1082     allocator_type get_allocator() const { return base_type::get_allocator(); }
1083 
1084     // swap two instances. Iterators are invalidated
1085     void swap(concurrent_hash_map& table) {
1086         using pocs_type = typename node_allocator_traits::propagate_on_container_swap;
1087         using is_equal_type = typename node_allocator_traits::is_always_equal;
1088         swap_allocators(this->my_allocator, table.my_allocator);
1089         internal_swap(table, tbb::detail::disjunction<pocs_type, is_equal_type>());
1090     }
1091 
1092     //------------------------------------------------------------------------
1093     // concurrent map operations
1094     //------------------------------------------------------------------------
1095 
1096     // Return count of items (0 or 1)
1097     size_type count( const Key &key ) const {
1098         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node);
1099     }
1100 
1101     template <typename K>
1102     typename std::enable_if<hash_compare_is_transparent<K>::value,
1103                             size_type>::type count( const K& key ) const {
1104         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node);
1105     }
1106 
1107     // Find item and acquire a read lock on the item.
1108     /** Return true if item is found, false otherwise. */
1109     bool find( const_accessor &result, const Key &key ) const {
1110         result.release();
1111         return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, &result, /*write=*/false, &do_not_allocate_node );
1112     }
1113 
1114     // Find item and acquire a write lock on the item.
1115     /** Return true if item is found, false otherwise. */
1116     bool find( accessor &result, const Key &key ) {
1117         result.release();
1118         return lookup</*insert*/false>(key, nullptr, &result, /*write=*/true, &do_not_allocate_node);
1119     }
1120 
1121     template <typename K>
1122     typename std::enable_if<hash_compare_is_transparent<K>::value,
1123                             bool>::type find( const_accessor& result, const K& key ) {
1124         result.release();
1125         return lookup</*insert*/false>(key, nullptr, &result, /*write=*/false, &do_not_allocate_node);
1126     }
1127 
1128     template <typename K>
1129     typename std::enable_if<hash_compare_is_transparent<K>::value,
1130                             bool>::type find( accessor& result, const K& key ) {
1131         result.release();
1132         return lookup</*insert*/false>(key, nullptr, &result, /*write=*/true, &do_not_allocate_node);
1133     }
1134 
1135     // Insert item (if not already present) and acquire a read lock on the item.
1136     /** Returns true if item is new. */
1137     bool insert( const_accessor &result, const Key &key ) {
1138         result.release();
1139         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<>);
1140     }
1141 
1142     // Insert item (if not already present) and acquire a write lock on the item.
1143     /** Returns true if item is new. */
1144     bool insert( accessor &result, const Key &key ) {
1145         result.release();
1146         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<>);
1147     }
1148 
1149     template <typename K>
1150     typename std::enable_if<hash_compare_is_transparent<K>::value &&
1151                             std::is_constructible<key_type, const K&>::value,
1152                             bool>::type insert( const_accessor& result, const K& key ) {
1153         result.release();
1154         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<K>);
1155     }
1156 
1157     template <typename K>
1158     typename std::enable_if<hash_compare_is_transparent<K>::value &&
1159                             std::is_constructible<key_type, const K&>::value,
1160                             bool>::type insert( accessor& result, const K& key ) {
1161         result.release();
1162         return lookup</*insert*/true>(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<K>);
1163     }
1164 
1165     // Insert item by copying if there is no such key present already and acquire a read lock on the item.
1166     /** Returns true if item is new. */
1167     bool insert( const_accessor &result, const value_type &value ) {
1168         result.release();
1169         return lookup</*insert*/true>(value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct);
1170     }
1171 
1172     // Insert item by copying if there is no such key present already and acquire a write lock on the item.
1173     /** Returns true if item is new. */
1174     bool insert( accessor &result, const value_type &value ) {
1175         result.release();
1176         return lookup</*insert*/true>(value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct);
1177     }
1178 
1179     // Insert item by copying if there is no such key present already
1180     /** Returns true if item is inserted. */
1181     bool insert( const value_type &value ) {
1182         return lookup</*insert*/true>(value.first, &value.second, nullptr, /*write=*/false, &allocate_node_copy_construct);
1183     }
1184 
1185     // Insert item by copying if there is no such key present already and acquire a read lock on the item.
1186     /** Returns true if item is new. */
1187     bool insert( const_accessor &result, value_type && value ) {
1188         return generic_move_insert(result, std::move(value));
1189     }
1190 
1191     // Insert item by copying if there is no such key present already and acquire a write lock on the item.
1192     /** Returns true if item is new. */
1193     bool insert( accessor &result, value_type && value ) {
1194         return generic_move_insert(result, std::move(value));
1195     }
1196 
1197     // Insert item by copying if there is no such key present already
1198     /** Returns true if item is inserted. */
1199     bool insert( value_type && value ) {
1200         return generic_move_insert(accessor_not_used(), std::move(value));
1201     }
1202 
1203     // Insert item by copying if there is no such key present already and acquire a read lock on the item.
1204     /** Returns true if item is new. */
1205     template <typename... Args>
1206     bool emplace( const_accessor &result, Args&&... args ) {
1207         return generic_emplace(result, std::forward<Args>(args)...);
1208     }
1209 
1210     // Insert item by copying if there is no such key present already and acquire a write lock on the item.
1211     /** Returns true if item is new. */
1212     template <typename... Args>
1213     bool emplace( accessor &result, Args&&... args ) {
1214         return generic_emplace(result, std::forward<Args>(args)...);
1215     }
1216 
1217     // Insert item by copying if there is no such key present already
1218     /** Returns true if item is inserted. */
1219     template <typename... Args>
1220     bool emplace( Args&&... args ) {
1221         return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1222     }
1223 
1224     // Insert range [first, last)
1225     template <typename I>
1226     void insert( I first, I last ) {
1227         for ( ; first != last; ++first )
1228             insert( *first );
1229     }
1230 
1231     // Insert initializer list
1232     void insert( std::initializer_list<value_type> il ) {
1233         insert( il.begin(), il.end() );
1234     }
1235 
1236     // Erase item.
1237     /** Return true if item was erased by particularly this call. */
1238     bool erase( const Key &key ) {
1239         return internal_erase(key);
1240     }
1241 
1242     template <typename K>
1243     typename std::enable_if<hash_compare_is_transparent<K>::value,
1244                             bool>::type erase( const K& key ) {
1245         return internal_erase(key);
1246     }
1247 
1248     // Erase item by const_accessor.
1249     /** Return true if item was erased by particularly this call. */
1250     bool erase( const_accessor& item_accessor ) {
1251         return exclude( item_accessor );
1252     }
1253 
1254     // Erase item by accessor.
1255     /** Return true if item was erased by particularly this call. */
1256     bool erase( accessor& item_accessor ) {
1257         return exclude( item_accessor );
1258     }
1259 
1260 protected:
1261     template <typename K, typename AllocateNodeType>
1262     node* allocate_node_helper( const K& key, const T* t, AllocateNodeType allocate_node, std::true_type ) {
1263         return allocate_node(base_type::get_allocator(), key, t);
1264     }
1265 
1266     template <typename K, typename AllocateNodeType>
1267     node* allocate_node_helper( const K&, const T*, AllocateNodeType, std::false_type ) {
1268         __TBB_ASSERT(false, "allocate_node_helper with std::false_type should never been called");
1269         return nullptr;
1270     }
1271 
1272     // Insert or find item and optionally acquire a lock on the item.
1273     template <bool OpInsert, typename K, typename AllocateNodeType>
1274     bool lookup( const K &key, const T *t, const_accessor *result, bool write, AllocateNodeType allocate_node, node *tmp_n  = nullptr)
1275     {
1276         __TBB_ASSERT( !result || !result->my_node, nullptr );
1277         bool return_value;
1278         hashcode_type const h = my_hash_compare.hash( key );
1279         hashcode_type m = this->my_mask.load(std::memory_order_acquire);
1280         segment_index_type grow_segment = 0;
1281         node *n;
1282         restart:
1283         {//lock scope
1284             __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1285             return_value = false;
1286             // get bucket
1287             bucket_accessor b( this, h & m );
1288             // find a node
1289             n = search_bucket( key, b() );
1290             if( OpInsert ) {
1291                 // [opt] insert a key
1292                 if( !n ) {
1293                     if( !tmp_n ) {
1294                         tmp_n = allocate_node_helper(key, t, allocate_node, std::integral_constant<bool, OpInsert>{});
1295                     }
1296                     while ( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1297                         // Rerun search list, in case another thread inserted the intem during the upgrade
1298                         n = search_bucket(key, b());
1299                         if (this->is_valid(n)) { // unfortunately, it did
1300                             if (!b.downgrade_to_reader()) {
1301                                 // If the lock was downgraded with reacquiring the mutex
1302                                 // Rerun search list in case another thread removed the item during the downgrade
1303                                 n = search_bucket(key, b());
1304                                 if (!this->is_valid(n)) {
1305                                     // Unfortunately, it did
1306                                     // We need to try upgrading to writer again
1307                                     continue;
1308                                 }
1309                             }
1310                             goto exists;
1311                         }
1312                     }
1313 
1314                     if( this->check_mask_race(h, m) )
1315                         goto restart; // b.release() is done in ~b().
1316                     // insert and set flag to grow the container
1317                     grow_segment = this->insert_new_node( b(), n = tmp_n, m );
1318                     tmp_n = nullptr;
1319                     return_value = true;
1320                 }
1321             } else { // find or count
1322                 if( !n ) {
1323                     if( this->check_mask_race( h, m ) )
1324                         goto restart; // b.release() is done in ~b(). TODO: replace by continue
1325                     return false;
1326                 }
1327                 return_value = true;
1328             }
1329         exists:
1330             if( !result ) goto check_growth;
1331             // TODO: the following seems as generic/regular operation
1332             // acquire the item
1333             if( !result->try_acquire( n->mutex, write ) ) {
1334                 for( tbb::detail::atomic_backoff backoff(true);; ) {
1335                     if( result->try_acquire( n->mutex, write ) ) break;
1336                     if( !backoff.bounded_pause() ) {
1337                         // the wait takes really long, restart the operation
1338                         b.release();
1339                         __TBB_ASSERT( !OpInsert || !return_value, "Can't acquire new item in locked bucket?" );
1340                         yield();
1341                         m = this->my_mask.load(std::memory_order_acquire);
1342                         goto restart;
1343                     }
1344                 }
1345             }
1346         }//lock scope
1347         result->my_node = n;
1348         result->my_hash = h;
1349     check_growth:
1350         // [opt] grow the container
1351         if( grow_segment ) {
1352             this->enable_segment( grow_segment );
1353         }
1354         if( tmp_n ) // if OpInsert only
1355             delete_node( tmp_n );
1356         return return_value;
1357     }
1358 
1359     struct accessor_not_used { void release(){}};
1360     friend const_accessor* accessor_location( accessor_not_used const& ){ return nullptr;}
1361     friend const_accessor* accessor_location( const_accessor & a )      { return &a;}
1362 
1363     friend bool is_write_access_needed( accessor const& )           { return true;}
1364     friend bool is_write_access_needed( const_accessor const& )     { return false;}
1365     friend bool is_write_access_needed( accessor_not_used const& )  { return false;}
1366 
1367     template <typename Accessor>
1368     bool generic_move_insert( Accessor && result, value_type && value ) {
1369         result.release();
1370         return lookup</*insert*/true>(value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct);
1371     }
1372 
1373     template <typename Accessor, typename... Args>
1374     bool generic_emplace( Accessor && result, Args &&... args ) {
1375         result.release();
1376         node * node_ptr = create_node(base_type::get_allocator(), std::forward<Args>(args)...);
1377         return lookup</*insert*/true>(node_ptr->value().first, nullptr, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr);
1378     }
1379 
1380     // delete item by accessor
1381     bool exclude( const_accessor &item_accessor ) {
1382         __TBB_ASSERT( item_accessor.my_node, nullptr );
1383         node_base *const exclude_node = item_accessor.my_node;
1384         hashcode_type const hash = item_accessor.my_hash;
1385         hashcode_type mask = this->my_mask.load(std::memory_order_acquire);
1386         do {
1387             // get bucket
1388             bucket_accessor b( this, hash & mask, /*writer=*/true );
1389             node_base* prev = nullptr;
1390             node_base* curr = b()->node_list.load(std::memory_order_relaxed);
1391 
1392             while (curr && curr != exclude_node) {
1393                 prev = curr;
1394                 curr = curr->next;
1395             }
1396 
1397             if (curr == nullptr) { // someone else was first
1398                 if (this->check_mask_race(hash, mask))
1399                     continue;
1400                 item_accessor.release();
1401                 return false;
1402             }
1403             __TBB_ASSERT( curr == exclude_node, nullptr );
1404             // remove from container
1405             if (prev == nullptr) {
1406                 b()->node_list.store(curr->next, std::memory_order_relaxed);
1407             } else {
1408                 prev->next = curr->next;
1409             }
1410 
1411             this->my_size--;
1412             break;
1413         } while(true);
1414         if (!item_accessor.is_writer()) { // need to get exclusive lock
1415             item_accessor.upgrade_to_writer(); // return value means nothing here
1416         }
1417 
1418         item_accessor.release();
1419         delete_node(exclude_node); // Only one thread can delete it
1420         return true;
1421     }
1422 
1423     template <typename K>
1424     bool internal_erase( const K& key ) {
1425         node_base *erase_node;
1426         hashcode_type const hash = my_hash_compare.hash(key);
1427         hashcode_type mask = this->my_mask.load(std::memory_order_acquire);
1428     restart:
1429         {//lock scope
1430             // get bucket
1431             bucket_accessor b( this, hash & mask );
1432         search:
1433             node_base* prev = nullptr;
1434             erase_node = b()->node_list.load(std::memory_order_relaxed);
1435             while (this->is_valid(erase_node) && !my_hash_compare.equal(key, static_cast<node*>(erase_node)->value().first ) ) {
1436                 prev = erase_node;
1437                 erase_node = erase_node->next;
1438             }
1439 
1440             if (erase_node == nullptr) { // not found, but mask could be changed
1441                 if (this->check_mask_race(hash, mask))
1442                     goto restart;
1443                 return false;
1444             } else if (!b.is_writer() && !b.upgrade_to_writer()) {
1445                 if (this->check_mask_race(hash, mask)) // contended upgrade, check mask
1446                     goto restart;
1447                 goto search;
1448             }
1449 
1450             // remove from container
1451             if (prev == nullptr) {
1452                 b()->node_list.store(erase_node->next, std::memory_order_relaxed);
1453             } else {
1454                 prev->next = erase_node->next;
1455             }
1456             this->my_size--;
1457         }
1458         {
1459             typename node::scoped_type item_locker( erase_node->mutex, /*write=*/true );
1460         }
1461         // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1462         delete_node(erase_node); // Only one thread can delete it due to write lock on the bucket
1463         return true;
1464     }
1465 
1466     // Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
1467     template <typename K, typename I>
1468     std::pair<I, I> internal_equal_range( const K& key, I end_ ) const {
1469         hashcode_type h = my_hash_compare.hash( key );
1470         hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
1471         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1472         h &= m;
1473         bucket *b = this->get_bucket( h );
1474         while (rehash_required(b->node_list.load(std::memory_order_relaxed))) {
1475             m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit
1476             b = this->get_bucket( h &= m );
1477         }
1478         node *n = search_bucket( key, b );
1479         if( !n )
1480             return std::make_pair(end_, end_);
1481         iterator lower(*this, h, b, n), upper(lower);
1482         return std::make_pair(lower, ++upper);
1483     }
1484 
1485     // Copy "source" to *this, where *this must start out empty.
1486     void internal_copy( const concurrent_hash_map& source ) {
1487         hashcode_type mask = source.my_mask.load(std::memory_order_relaxed);
1488         if( this->my_mask.load(std::memory_order_relaxed) == mask ) { // optimized version
1489             this->reserve(source.my_size.load(std::memory_order_relaxed)); // TODO: load_factor?
1490             bucket *dst = nullptr, *src = nullptr;
1491             bool rehashing_required = false;
1492             for( hashcode_type k = 0; k <= mask; k++ ) {
1493                 if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1494                 else { dst = this->get_bucket( k ); src = source.get_bucket( k ); }
1495                 __TBB_ASSERT(!rehash_required(dst->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table");
1496                 node *n = static_cast<node*>( src->node_list.load(std::memory_order_relaxed) );
1497                 if (rehash_required(n)) { // source is not rehashed, items are in previous buckets
1498                     rehashing_required = true;
1499                     dst->node_list.store(reinterpret_cast<node_base*>(rehash_req_flag), std::memory_order_relaxed);
1500                 } else for(; n; n = static_cast<node*>( n->next ) ) {
1501                     node* node_ptr = create_node(base_type::get_allocator(), n->value().first, n->value().second);
1502                     this->add_to_bucket( dst, node_ptr);
1503                     this->my_size.fetch_add(1, std::memory_order_relaxed);
1504                 }
1505             }
1506             if( rehashing_required ) rehash();
1507         } else internal_copy(source.begin(), source.end(), source.my_size.load(std::memory_order_relaxed));
1508     }
1509 
1510     template <typename I>
1511     void internal_copy( I first, I last, size_type reserve_size ) {
1512         this->reserve(reserve_size); // TODO: load_factor?
1513         hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
1514         for(; first != last; ++first) {
1515             hashcode_type h = my_hash_compare.hash( (*first).first );
1516             bucket *b = this->get_bucket( h & m );
1517             __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table");
1518             node* node_ptr = create_node(base_type::get_allocator(), (*first).first, (*first).second);
1519             this->add_to_bucket( b, node_ptr );
1520             ++this->my_size; // TODO: replace by non-atomic op
1521         }
1522     }
1523 
1524     void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type&,
1525                                                 /*is_always_equal=*/std::true_type )
1526     {
1527         this->internal_move(std::move(other));
1528     }
1529 
1530     void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type& a,
1531                                                 /*is_always_equal=*/std::false_type )
1532     {
1533         if (a == other.get_allocator()){
1534             this->internal_move(std::move(other));
1535         } else {
1536             try_call( [&] {
1537                 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()),
1538                     other.size());
1539             }).on_exception( [&] {
1540                 this->clear();
1541             });
1542         }
1543     }
1544 
1545     void internal_move_assign( concurrent_hash_map&& other,
1546         /*is_always_equal || POCMA = */std::true_type)
1547     {
1548         this->internal_move(std::move(other));
1549     }
1550 
1551     void internal_move_assign(concurrent_hash_map&& other, /*is_always_equal=*/ std::false_type) {
1552         if (this->my_allocator == other.my_allocator) {
1553             this->internal_move(std::move(other));
1554         } else {
1555             //do per element move
1556             internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()),
1557                 other.size());
1558         }
1559     }
1560 
1561     void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::true_type) {
1562         this->internal_swap_content(other);
1563     }
1564 
1565     void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::false_type) {
1566         __TBB_ASSERT(this->my_allocator == other.my_allocator, nullptr);
1567         this->internal_swap_content(other);
1568     }
1569 
1570     // Fast find when no concurrent erasure is used. For internal use inside TBB only!
1571     /** Return pointer to item with given key, or nullptr if no such item exists.
1572         Must not be called concurrently with erasure operations. */
1573     const_pointer internal_fast_find( const Key& key ) const {
1574         hashcode_type h = my_hash_compare.hash( key );
1575         hashcode_type m = this->my_mask.load(std::memory_order_acquire);
1576         node *n;
1577     restart:
1578         __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1579         bucket *b = this->get_bucket( h & m );
1580         // TODO: actually, notification is unnecessary here, just hiding double-check
1581         if (rehash_required(b->node_list.load(std::memory_order_acquire)))
1582         {
1583             typename bucket::scoped_type lock;
1584             if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1585                 if (rehash_required(b->node_list.load(std::memory_order_relaxed)))
1586                     const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1587             }
1588             else lock.acquire( b->mutex, /*write=*/false );
1589             __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr);
1590         }
1591         n = search_bucket( key, b );
1592         if( n )
1593             return n->storage();
1594         else if( this->check_mask_race( h, m ) )
1595             goto restart;
1596         return nullptr;
1597     }
1598 };
1599 
1600 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1601 template <typename It,
1602           typename HashCompare = d1::tbb_hash_compare<iterator_key_t<It>>,
1603           typename Alloc = tbb_allocator<iterator_alloc_pair_t<It>>,
1604           typename = std::enable_if_t<is_input_iterator_v<It>>,
1605           typename = std::enable_if_t<is_allocator_v<Alloc>>,
1606           typename = std::enable_if_t<!is_allocator_v<HashCompare>>>
1607 concurrent_hash_map( It, It, HashCompare = HashCompare(), Alloc = Alloc() )
1608 -> concurrent_hash_map<iterator_key_t<It>, iterator_mapped_t<It>, HashCompare, Alloc>;
1609 
1610 template <typename It, typename Alloc,
1611           typename = std::enable_if_t<is_input_iterator_v<It>>,
1612           typename = std::enable_if_t<is_allocator_v<Alloc>>>
1613 concurrent_hash_map( It, It, Alloc )
1614 -> concurrent_hash_map<iterator_key_t<It>, iterator_mapped_t<It>, d1::tbb_hash_compare<iterator_key_t<It>>, Alloc>;
1615 
1616 template <typename Key, typename T,
1617           typename HashCompare = d1::tbb_hash_compare<std::remove_const_t<Key>>,
1618           typename Alloc = tbb_allocator<std::pair<const Key, T>>,
1619           typename = std::enable_if_t<is_allocator_v<Alloc>>,
1620           typename = std::enable_if_t<!is_allocator_v<HashCompare>>>
1621 concurrent_hash_map( std::initializer_list<std::pair<Key, T>>, HashCompare = HashCompare(), Alloc = Alloc() )
1622 -> concurrent_hash_map<std::remove_const_t<Key>, T, HashCompare, Alloc>;
1623 
1624 template <typename Key, typename T, typename Alloc,
1625           typename = std::enable_if_t<is_allocator_v<Alloc>>>
1626 concurrent_hash_map( std::initializer_list<std::pair<Key, T>>, Alloc )
1627 -> concurrent_hash_map<std::remove_const_t<Key>, T, d1::tbb_hash_compare<std::remove_const_t<Key>>, Alloc>;
1628 
1629 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1630 
1631 template <typename Key, typename T, typename HashCompare, typename A1, typename A2>
1632 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1633     if(a.size() != b.size()) return false;
1634     typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1635     typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1636     for(; i != i_end; ++i) {
1637         j = b.equal_range(i->first).first;
1638         if( j == j_end || !(i->second == j->second) ) return false;
1639     }
1640     return true;
1641 }
1642 
1643 #if !__TBB_CPP20_COMPARISONS_PRESENT
1644 template <typename Key, typename T, typename HashCompare, typename A1, typename A2>
1645 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1646 {    return !(a == b); }
1647 #endif // !__TBB_CPP20_COMPARISONS_PRESENT
1648 
1649 template <typename Key, typename T, typename HashCompare, typename A>
swap(concurrent_hash_map<Key,T,HashCompare,A> & a,concurrent_hash_map<Key,T,HashCompare,A> & b)1650 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1651 {    a.swap( b ); }
1652 
1653 } // namespace d2
1654 } // namespace detail
1655 
1656 inline namespace v1 {
1657     using detail::split;
1658     using detail::d2::concurrent_hash_map;
1659     using detail::d1::tbb_hash_compare;
1660 } // namespace v1
1661 
1662 } // namespace tbb
1663 
1664 #endif /* __TBB_concurrent_hash_map_H */
1665