149e08aacStbbdev /*
213f9f32bSSergey Zheltov Copyright (c) 2005-2022 Intel Corporation
349e08aacStbbdev
449e08aacStbbdev Licensed under the Apache License, Version 2.0 (the "License");
549e08aacStbbdev you may not use this file except in compliance with the License.
649e08aacStbbdev You may obtain a copy of the License at
749e08aacStbbdev
849e08aacStbbdev http://www.apache.org/licenses/LICENSE-2.0
949e08aacStbbdev
1049e08aacStbbdev Unless required by applicable law or agreed to in writing, software
1149e08aacStbbdev distributed under the License is distributed on an "AS IS" BASIS,
1249e08aacStbbdev WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1349e08aacStbbdev See the License for the specific language governing permissions and
1449e08aacStbbdev limitations under the License.
1549e08aacStbbdev */
1649e08aacStbbdev
1749e08aacStbbdev #ifndef __TBB_concurrent_hash_map_H
1849e08aacStbbdev #define __TBB_concurrent_hash_map_H
1949e08aacStbbdev
2049e08aacStbbdev #include "detail/_namespace_injection.h"
2149e08aacStbbdev #include "detail/_utils.h"
2249e08aacStbbdev #include "detail/_assert.h"
2349e08aacStbbdev #include "detail/_allocator_traits.h"
2449e08aacStbbdev #include "detail/_containers_helpers.h"
2549e08aacStbbdev #include "detail/_template_helpers.h"
2649e08aacStbbdev #include "detail/_hash_compare.h"
2749e08aacStbbdev #include "detail/_range_common.h"
2849e08aacStbbdev #include "tbb_allocator.h"
2949e08aacStbbdev #include "spin_rw_mutex.h"
3049e08aacStbbdev
3149e08aacStbbdev #include <atomic>
3249e08aacStbbdev #include <initializer_list>
3349e08aacStbbdev #include <tuple>
3449e08aacStbbdev #include <iterator>
3549e08aacStbbdev #include <utility> // Need std::pair
3649e08aacStbbdev #include <cstring> // Need std::memset
3749e08aacStbbdev
3849e08aacStbbdev namespace tbb {
3949e08aacStbbdev namespace detail {
409e15720bStbbdev namespace d2 {
4149e08aacStbbdev
4216b6a651Skboyarinov #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS && __TBB_CPP20_CONCEPTS_PRESENT
4316b6a651Skboyarinov template <typename Mutex>
4416b6a651Skboyarinov concept ch_map_rw_scoped_lockable = rw_scoped_lockable<Mutex> &&
requires(const typename Mutex::scoped_lock & sl)4516b6a651Skboyarinov requires(const typename Mutex::scoped_lock& sl) {
4616b6a651Skboyarinov { sl.is_writer() } -> std::convertible_to<bool>;
4716b6a651Skboyarinov };
4816b6a651Skboyarinov #endif
4916b6a651Skboyarinov
509e15720bStbbdev template <typename MutexType>
5149e08aacStbbdev struct hash_map_node_base : no_copy {
529e15720bStbbdev using mutex_type = MutexType;
5349e08aacStbbdev // Scoped lock type for mutex
549e15720bStbbdev using scoped_type = typename MutexType::scoped_lock;
5549e08aacStbbdev // Next node in chain
5649e08aacStbbdev hash_map_node_base* next;
5749e08aacStbbdev mutex_type mutex;
5849e08aacStbbdev };
5949e08aacStbbdev
6049e08aacStbbdev // Incompleteness flag value
619e15720bStbbdev static void* const rehash_req_flag = reinterpret_cast<void*>(std::size_t(3));
6249e08aacStbbdev // Rehashed empty bucket flag
639e15720bStbbdev static void* const empty_rehashed_flag = reinterpret_cast<void*>(std::size_t(0));
649e15720bStbbdev
659e15720bStbbdev template <typename MutexType>
rehash_required(hash_map_node_base<MutexType> * node_ptr)669e15720bStbbdev bool rehash_required( hash_map_node_base<MutexType>* node_ptr ) {
679e15720bStbbdev return reinterpret_cast<void*>(node_ptr) == rehash_req_flag;
689e15720bStbbdev }
699e15720bStbbdev
70112076d0SIlya Isaev #if TBB_USE_ASSERT
719e15720bStbbdev template <typename MutexType>
empty_rehashed(hash_map_node_base<MutexType> * node_ptr)729e15720bStbbdev bool empty_rehashed( hash_map_node_base<MutexType>* node_ptr ) {
739e15720bStbbdev return reinterpret_cast<void*>(node_ptr) == empty_rehashed_flag;
749e15720bStbbdev }
75112076d0SIlya Isaev #endif
7649e08aacStbbdev
7749e08aacStbbdev // base class of concurrent_hash_map
7849e08aacStbbdev
799e15720bStbbdev template <typename Allocator, typename MutexType>
8049e08aacStbbdev class hash_map_base {
8149e08aacStbbdev public:
8249e08aacStbbdev using size_type = std::size_t;
8349e08aacStbbdev using hashcode_type = std::size_t;
8449e08aacStbbdev using segment_index_type = std::size_t;
859e15720bStbbdev using node_base = hash_map_node_base<MutexType>;
8649e08aacStbbdev
8749e08aacStbbdev struct bucket : no_copy {
889e15720bStbbdev using mutex_type = MutexType;
899e15720bStbbdev using scoped_type = typename mutex_type::scoped_lock;
90b15aabb3Stbbdev
bucketbucket91b15aabb3Stbbdev bucket() : node_list(nullptr) {}
bucketbucket92b15aabb3Stbbdev bucket( node_base* ptr ) : node_list(ptr) {}
93b15aabb3Stbbdev
9449e08aacStbbdev mutex_type mutex;
9549e08aacStbbdev std::atomic<node_base*> node_list;
9649e08aacStbbdev };
9749e08aacStbbdev
9849e08aacStbbdev using allocator_type = Allocator;
9949e08aacStbbdev using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
10049e08aacStbbdev using bucket_allocator_type = typename allocator_traits_type::template rebind_alloc<bucket>;
10149e08aacStbbdev using bucket_allocator_traits = tbb::detail::allocator_traits<bucket_allocator_type>;
10249e08aacStbbdev
10349e08aacStbbdev // Count of segments in the first block
10449e08aacStbbdev static constexpr size_type embedded_block = 1;
10549e08aacStbbdev // Count of segments in the first block
10649e08aacStbbdev static constexpr size_type embedded_buckets = 1 << embedded_block;
10749e08aacStbbdev // Count of segments in the first block
10849e08aacStbbdev static constexpr size_type first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
10949e08aacStbbdev // Size of a pointer / table size
11049e08aacStbbdev static constexpr size_type pointers_per_table = sizeof(segment_index_type) * 8; // one segment per bit
11149e08aacStbbdev
11249e08aacStbbdev using segment_ptr_type = bucket*;
11349e08aacStbbdev using atomic_segment_type = std::atomic<segment_ptr_type>;
11449e08aacStbbdev using segments_table_type = atomic_segment_type[pointers_per_table];
11549e08aacStbbdev
hash_map_base(const allocator_type & alloc)11649e08aacStbbdev hash_map_base( const allocator_type& alloc ) : my_allocator(alloc), my_mask(embedded_buckets - 1), my_size(0) {
11749e08aacStbbdev for (size_type i = 0; i != embedded_buckets; ++i) {
11849e08aacStbbdev my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed);
11949e08aacStbbdev }
12049e08aacStbbdev
12149e08aacStbbdev for (size_type segment_index = 0; segment_index < pointers_per_table; ++segment_index) {
12249e08aacStbbdev auto argument = segment_index < embedded_block ? my_embedded_segment + segment_base(segment_index) : nullptr;
123b15aabb3Stbbdev my_table[segment_index].store(argument, std::memory_order_relaxed);
12449e08aacStbbdev }
12549e08aacStbbdev
12649e08aacStbbdev __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
12749e08aacStbbdev }
12849e08aacStbbdev
12949e08aacStbbdev // segment index of given index in the array
segment_index_of(size_type index)13049e08aacStbbdev static segment_index_type segment_index_of( size_type index ) {
13149e08aacStbbdev return segment_index_type(tbb::detail::log2( index|1 ));
13249e08aacStbbdev }
13349e08aacStbbdev
13449e08aacStbbdev // the first array index of given segment
segment_base(segment_index_type k)13549e08aacStbbdev static segment_index_type segment_base( segment_index_type k ) {
13649e08aacStbbdev return (segment_index_type(1) << k & ~segment_index_type(1));
13749e08aacStbbdev }
13849e08aacStbbdev
13949e08aacStbbdev // segment size except for k == 0
segment_size(segment_index_type k)14049e08aacStbbdev static size_type segment_size( segment_index_type k ) {
14149e08aacStbbdev return size_type(1) << k; // fake value for k==0
14249e08aacStbbdev }
14349e08aacStbbdev
14449e08aacStbbdev // true if ptr is valid pointer
is_valid(void * ptr)14549e08aacStbbdev static bool is_valid( void* ptr ) {
14649e08aacStbbdev return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
14749e08aacStbbdev }
14849e08aacStbbdev
149b15aabb3Stbbdev template <typename... Args>
init_buckets_impl(segment_ptr_type ptr,size_type sz,const Args &...args)150f2af7473Skboyarinov void init_buckets_impl( segment_ptr_type ptr, size_type sz, const Args&... args ) {
151b15aabb3Stbbdev for (size_type i = 0; i < sz; ++i) {
152f2af7473Skboyarinov bucket_allocator_traits::construct(my_allocator, ptr + i, args...);
15349e08aacStbbdev }
15449e08aacStbbdev }
155b15aabb3Stbbdev
156b15aabb3Stbbdev // Initialize buckets
init_buckets(segment_ptr_type ptr,size_type sz,bool is_initial)157b15aabb3Stbbdev void init_buckets( segment_ptr_type ptr, size_type sz, bool is_initial ) {
158b15aabb3Stbbdev if (is_initial) {
159b15aabb3Stbbdev init_buckets_impl(ptr, sz);
160b15aabb3Stbbdev } else {
1619e15720bStbbdev init_buckets_impl(ptr, sz, reinterpret_cast<node_base*>(rehash_req_flag));
162b15aabb3Stbbdev }
16349e08aacStbbdev }
16449e08aacStbbdev
16549e08aacStbbdev // Add node n to bucket b
add_to_bucket(bucket * b,node_base * n)16649e08aacStbbdev static void add_to_bucket( bucket* b, node_base* n ) {
1679e15720bStbbdev __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr);
16849e08aacStbbdev n->next = b->node_list.load(std::memory_order_relaxed);
16949e08aacStbbdev b->node_list.store(n, std::memory_order_relaxed); // its under lock and flag is set
17049e08aacStbbdev }
17149e08aacStbbdev
get_allocator()17249e08aacStbbdev const bucket_allocator_type& get_allocator() const {
17349e08aacStbbdev return my_allocator;
17449e08aacStbbdev }
17549e08aacStbbdev
get_allocator()17649e08aacStbbdev bucket_allocator_type& get_allocator() {
17749e08aacStbbdev return my_allocator;
17849e08aacStbbdev }
17949e08aacStbbdev
18049e08aacStbbdev // Enable segment
18149e08aacStbbdev void enable_segment( segment_index_type k, bool is_initial = false ) {
18249e08aacStbbdev __TBB_ASSERT( k, "Zero segment must be embedded" );
18349e08aacStbbdev size_type sz;
18449e08aacStbbdev __TBB_ASSERT( !is_valid(my_table[k].load(std::memory_order_relaxed)), "Wrong concurrent assignment");
18549e08aacStbbdev if (k >= first_block) {
18649e08aacStbbdev sz = segment_size(k);
18749e08aacStbbdev segment_ptr_type ptr = nullptr;
18849e08aacStbbdev try_call( [&] {
18949e08aacStbbdev ptr = bucket_allocator_traits::allocate(my_allocator, sz);
19049e08aacStbbdev } ).on_exception( [&] {
19149e08aacStbbdev my_table[k].store(nullptr, std::memory_order_relaxed);
19249e08aacStbbdev });
19349e08aacStbbdev
19449e08aacStbbdev __TBB_ASSERT(ptr, nullptr);
19549e08aacStbbdev init_buckets(ptr, sz, is_initial);
19649e08aacStbbdev my_table[k].store(ptr, std::memory_order_release);
19749e08aacStbbdev sz <<= 1;// double it to get entire capacity of the container
19849e08aacStbbdev } else { // the first block
19949e08aacStbbdev __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
20049e08aacStbbdev sz = segment_size(first_block);
20149e08aacStbbdev segment_ptr_type ptr = nullptr;
20249e08aacStbbdev try_call( [&] {
20349e08aacStbbdev ptr = bucket_allocator_traits::allocate(my_allocator, sz - embedded_buckets);
20449e08aacStbbdev } ).on_exception( [&] {
20549e08aacStbbdev my_table[k].store(nullptr, std::memory_order_relaxed);
20649e08aacStbbdev });
20749e08aacStbbdev
20849e08aacStbbdev __TBB_ASSERT(ptr, nullptr);
20949e08aacStbbdev init_buckets(ptr, sz - embedded_buckets, is_initial);
21049e08aacStbbdev ptr -= segment_base(embedded_block);
21149e08aacStbbdev for(segment_index_type i = embedded_block; i < first_block; i++) // calc the offsets
21249e08aacStbbdev my_table[i].store(ptr + segment_base(i), std::memory_order_release);
21349e08aacStbbdev }
21449e08aacStbbdev my_mask.store(sz-1, std::memory_order_release);
21549e08aacStbbdev }
21649e08aacStbbdev
delete_segment(segment_index_type s)21749e08aacStbbdev void delete_segment( segment_index_type s ) {
21849e08aacStbbdev segment_ptr_type buckets_ptr = my_table[s].load(std::memory_order_relaxed);
21949e08aacStbbdev size_type sz = segment_size( s ? s : 1 );
22049e08aacStbbdev
221b15aabb3Stbbdev size_type deallocate_size = 0;
222b15aabb3Stbbdev
223b15aabb3Stbbdev if (s >= first_block) { // the first segment or the next
224b15aabb3Stbbdev deallocate_size = sz;
225b15aabb3Stbbdev } else if (s == embedded_block && embedded_block != first_block) {
226b15aabb3Stbbdev deallocate_size = segment_size(first_block) - embedded_buckets;
227b15aabb3Stbbdev }
228b15aabb3Stbbdev
229b15aabb3Stbbdev for (size_type i = 0; i < deallocate_size; ++i) {
230b15aabb3Stbbdev bucket_allocator_traits::destroy(my_allocator, buckets_ptr + i);
231b15aabb3Stbbdev }
232b15aabb3Stbbdev if (deallocate_size != 0) {
233b15aabb3Stbbdev bucket_allocator_traits::deallocate(my_allocator, buckets_ptr, deallocate_size);
234b15aabb3Stbbdev }
235b15aabb3Stbbdev
23649e08aacStbbdev if (s >= embedded_block) my_table[s].store(nullptr, std::memory_order_relaxed);
23749e08aacStbbdev }
23849e08aacStbbdev
23949e08aacStbbdev // Get bucket by (masked) hashcode
get_bucket(hashcode_type h)24049e08aacStbbdev bucket *get_bucket( hashcode_type h ) const noexcept {
24149e08aacStbbdev segment_index_type s = segment_index_of( h );
24249e08aacStbbdev h -= segment_base(s);
24349e08aacStbbdev segment_ptr_type seg = my_table[s].load(std::memory_order_acquire);
24449e08aacStbbdev __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
24549e08aacStbbdev return &seg[h];
24649e08aacStbbdev }
24749e08aacStbbdev
24849e08aacStbbdev // detail serial rehashing helper
mark_rehashed_levels(hashcode_type h)24949e08aacStbbdev void mark_rehashed_levels( hashcode_type h ) noexcept {
25049e08aacStbbdev segment_index_type s = segment_index_of( h );
25149e08aacStbbdev while (segment_ptr_type seg = my_table[++s].load(std::memory_order_relaxed))
2529e15720bStbbdev if (rehash_required(seg[h].node_list.load(std::memory_order_relaxed))) {
2539e15720bStbbdev seg[h].node_list.store(reinterpret_cast<node_base*>(empty_rehashed_flag), std::memory_order_relaxed);
25449e08aacStbbdev mark_rehashed_levels( h + ((hashcode_type)1<<s) ); // optimized segment_base(s)
25549e08aacStbbdev }
25649e08aacStbbdev }
25749e08aacStbbdev
25849e08aacStbbdev // Check for mask race
25949e08aacStbbdev // Splitting into two functions should help inlining
check_mask_race(const hashcode_type h,hashcode_type & m)26049e08aacStbbdev inline bool check_mask_race( const hashcode_type h, hashcode_type &m ) const {
26149e08aacStbbdev hashcode_type m_now, m_old = m;
26249e08aacStbbdev m_now = my_mask.load(std::memory_order_acquire);
26349e08aacStbbdev if (m_old != m_now) {
26449e08aacStbbdev return check_rehashing_collision(h, m_old, m = m_now);
26549e08aacStbbdev }
26649e08aacStbbdev return false;
26749e08aacStbbdev }
26849e08aacStbbdev
26949e08aacStbbdev // Process mask race, check for rehashing collision
check_rehashing_collision(const hashcode_type h,hashcode_type m_old,hashcode_type m)27049e08aacStbbdev bool check_rehashing_collision( const hashcode_type h, hashcode_type m_old, hashcode_type m ) const {
27149e08aacStbbdev __TBB_ASSERT(m_old != m, nullptr); // TODO?: m arg could be optimized out by passing h = h&m
27249e08aacStbbdev if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
27349e08aacStbbdev // condition above proves that 'h' has some other bits set beside 'm_old'
27449e08aacStbbdev // find next applicable mask after m_old //TODO: look at bsl instruction
27549e08aacStbbdev for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
27649e08aacStbbdev ;
27749e08aacStbbdev m_old = (m_old<<1) - 1; // get full mask from a bit
27849e08aacStbbdev __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, nullptr);
27949e08aacStbbdev // check whether it is rehashing/ed
2809e15720bStbbdev if (!rehash_required(get_bucket(h & m_old)->node_list.load(std::memory_order_acquire))) {
28149e08aacStbbdev return true;
28249e08aacStbbdev }
28349e08aacStbbdev }
28449e08aacStbbdev return false;
28549e08aacStbbdev }
28649e08aacStbbdev
28749e08aacStbbdev // Insert a node and check for load factor. @return segment index to enable.
insert_new_node(bucket * b,node_base * n,hashcode_type mask)28849e08aacStbbdev segment_index_type insert_new_node( bucket *b, node_base *n, hashcode_type mask ) {
28949e08aacStbbdev size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
29049e08aacStbbdev add_to_bucket( b, n );
29149e08aacStbbdev // check load factor
29249e08aacStbbdev if( sz >= mask ) { // TODO: add custom load_factor
29349e08aacStbbdev segment_index_type new_seg = tbb::detail::log2( mask+1 ); //optimized segment_index_of
29449e08aacStbbdev __TBB_ASSERT( is_valid(my_table[new_seg-1].load(std::memory_order_relaxed)), "new allocations must not publish new mask until segment has allocated");
295*5e91b2c0SVladislav Shchapov static const segment_ptr_type is_allocating = segment_ptr_type(2);
29649e08aacStbbdev segment_ptr_type disabled = nullptr;
29749e08aacStbbdev if (!(my_table[new_seg].load(std::memory_order_acquire))
29849e08aacStbbdev && my_table[new_seg].compare_exchange_strong(disabled, is_allocating))
29949e08aacStbbdev return new_seg; // The value must be processed
30049e08aacStbbdev }
30149e08aacStbbdev return 0;
30249e08aacStbbdev }
30349e08aacStbbdev
30449e08aacStbbdev // Prepare enough segments for number of buckets
reserve(size_type buckets)30549e08aacStbbdev void reserve(size_type buckets) {
30649e08aacStbbdev if( !buckets-- ) return;
30749e08aacStbbdev bool is_initial = !my_size.load(std::memory_order_relaxed);
30849e08aacStbbdev for (size_type m = my_mask.load(std::memory_order_relaxed); buckets > m;
30949e08aacStbbdev m = my_mask.load(std::memory_order_relaxed))
31049e08aacStbbdev {
31149e08aacStbbdev enable_segment( segment_index_of( m+1 ), is_initial );
31249e08aacStbbdev }
31349e08aacStbbdev }
31449e08aacStbbdev
31549e08aacStbbdev // Swap hash_map_bases
internal_swap_content(hash_map_base & table)31649e08aacStbbdev void internal_swap_content(hash_map_base &table) {
31749e08aacStbbdev using std::swap;
31849e08aacStbbdev swap_atomics_relaxed(my_mask, table.my_mask);
31949e08aacStbbdev swap_atomics_relaxed(my_size, table.my_size);
32049e08aacStbbdev
32149e08aacStbbdev for(size_type i = 0; i < embedded_buckets; i++) {
32249e08aacStbbdev auto temp = my_embedded_segment[i].node_list.load(std::memory_order_relaxed);
32349e08aacStbbdev my_embedded_segment[i].node_list.store(table.my_embedded_segment[i].node_list.load(std::memory_order_relaxed),
32449e08aacStbbdev std::memory_order_relaxed);
32549e08aacStbbdev table.my_embedded_segment[i].node_list.store(temp, std::memory_order_relaxed);
32649e08aacStbbdev }
32749e08aacStbbdev for(size_type i = embedded_block; i < pointers_per_table; i++) {
32849e08aacStbbdev auto temp = my_table[i].load(std::memory_order_relaxed);
32949e08aacStbbdev my_table[i].store(table.my_table[i].load(std::memory_order_relaxed),
33049e08aacStbbdev std::memory_order_relaxed);
33149e08aacStbbdev table.my_table[i].store(temp, std::memory_order_relaxed);
33249e08aacStbbdev }
33349e08aacStbbdev }
33449e08aacStbbdev
internal_move(hash_map_base && other)33549e08aacStbbdev void internal_move(hash_map_base&& other) {
33649e08aacStbbdev my_mask.store(other.my_mask.load(std::memory_order_relaxed), std::memory_order_relaxed);
33749e08aacStbbdev other.my_mask.store(embedded_buckets - 1, std::memory_order_relaxed);
33849e08aacStbbdev
33949e08aacStbbdev my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
34049e08aacStbbdev other.my_size.store(0, std::memory_order_relaxed);
34149e08aacStbbdev
34249e08aacStbbdev for (size_type i = 0; i < embedded_buckets; ++i) {
34349e08aacStbbdev my_embedded_segment[i].node_list.store(other.my_embedded_segment[i].node_list, std::memory_order_relaxed);
34449e08aacStbbdev other.my_embedded_segment[i].node_list.store(nullptr, std::memory_order_relaxed);
34549e08aacStbbdev }
34649e08aacStbbdev
34749e08aacStbbdev for (size_type i = embedded_block; i < pointers_per_table; ++i) {
34849e08aacStbbdev my_table[i].store(other.my_table[i].load(std::memory_order_relaxed),
34949e08aacStbbdev std::memory_order_relaxed);
35049e08aacStbbdev other.my_table[i].store(nullptr, std::memory_order_relaxed);
35149e08aacStbbdev }
35249e08aacStbbdev }
35349e08aacStbbdev
35449e08aacStbbdev protected:
35549e08aacStbbdev bucket_allocator_type my_allocator;
35649e08aacStbbdev // Hash mask = sum of allocated segment sizes - 1
35749e08aacStbbdev std::atomic<hashcode_type> my_mask;
35849e08aacStbbdev // Size of container in stored items
35949e08aacStbbdev std::atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
36049e08aacStbbdev // Zero segment
36149e08aacStbbdev bucket my_embedded_segment[embedded_buckets];
36249e08aacStbbdev // Segment pointers table. Also prevents false sharing between my_mask and my_size
36349e08aacStbbdev segments_table_type my_table;
36449e08aacStbbdev };
36549e08aacStbbdev
36649e08aacStbbdev template <typename Iterator>
36749e08aacStbbdev class hash_map_range;
36849e08aacStbbdev
36949e08aacStbbdev // Meets requirements of a forward iterator for STL
37049e08aacStbbdev // Value is either the T or const T type of the container.
37149e08aacStbbdev template <typename Container, typename Value>
37249e08aacStbbdev class hash_map_iterator {
37349e08aacStbbdev using map_type = Container;
37449e08aacStbbdev using node = typename Container::node;
37549e08aacStbbdev using map_base = typename Container::base_type;
37649e08aacStbbdev using node_base = typename map_base::node_base;
37749e08aacStbbdev using bucket = typename map_base::bucket;
37849e08aacStbbdev public:
37949e08aacStbbdev using value_type = Value;
38049e08aacStbbdev using size_type = typename Container::size_type;
38149e08aacStbbdev using difference_type = typename Container::difference_type;
38249e08aacStbbdev using pointer = value_type*;
38349e08aacStbbdev using reference = value_type&;
38449e08aacStbbdev using iterator_category = std::forward_iterator_tag;
38549e08aacStbbdev
38649e08aacStbbdev // Construct undefined iterator
hash_map_iterator()38749e08aacStbbdev hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
hash_map_iterator(const hash_map_iterator<Container,typename Container::value_type> & other)38849e08aacStbbdev hash_map_iterator( const hash_map_iterator<Container, typename Container::value_type>& other ) :
38949e08aacStbbdev my_map(other.my_map),
39049e08aacStbbdev my_index(other.my_index),
39149e08aacStbbdev my_bucket(other.my_bucket),
39249e08aacStbbdev my_node(other.my_node)
39349e08aacStbbdev {}
39449e08aacStbbdev
39549e08aacStbbdev hash_map_iterator& operator=( const hash_map_iterator<Container, typename Container::value_type>& other ) {
39649e08aacStbbdev my_map = other.my_map;
39749e08aacStbbdev my_index = other.my_index;
39849e08aacStbbdev my_bucket = other.my_bucket;
39949e08aacStbbdev my_node = other.my_node;
40049e08aacStbbdev return *this;
40149e08aacStbbdev }
40249e08aacStbbdev
40349e08aacStbbdev Value& operator*() const {
40449e08aacStbbdev __TBB_ASSERT( map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
40549e08aacStbbdev return my_node->value();
40649e08aacStbbdev }
40749e08aacStbbdev
40849e08aacStbbdev Value* operator->() const {return &operator*();}
40949e08aacStbbdev
41049e08aacStbbdev hash_map_iterator& operator++() {
41149e08aacStbbdev my_node = static_cast<node*>( my_node->next );
41249e08aacStbbdev if( !my_node ) advance_to_next_bucket();
41349e08aacStbbdev return *this;
41449e08aacStbbdev }
41549e08aacStbbdev
41649e08aacStbbdev // Post increment
41749e08aacStbbdev hash_map_iterator operator++(int) {
41849e08aacStbbdev hash_map_iterator old(*this);
41949e08aacStbbdev operator++();
42049e08aacStbbdev return old;
42149e08aacStbbdev }
42249e08aacStbbdev private:
42349e08aacStbbdev template <typename C, typename T, typename U>
42449e08aacStbbdev friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
42549e08aacStbbdev
42649e08aacStbbdev template <typename C, typename T, typename U>
42749e08aacStbbdev friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
42849e08aacStbbdev
42949e08aacStbbdev template <typename C, typename T, typename U>
43049e08aacStbbdev friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
43149e08aacStbbdev
43249e08aacStbbdev template <typename C, typename U>
43349e08aacStbbdev friend class hash_map_iterator;
43449e08aacStbbdev
43549e08aacStbbdev template <typename I>
43649e08aacStbbdev friend class hash_map_range;
43749e08aacStbbdev
advance_to_next_bucket()43849e08aacStbbdev void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
43949e08aacStbbdev size_t k = my_index+1;
44049e08aacStbbdev __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
44149e08aacStbbdev while (k <= my_map->my_mask.load(std::memory_order_relaxed)) {
44249e08aacStbbdev // Following test uses 2's-complement wizardry
44349e08aacStbbdev if( k&(k-2) ) // not the beginning of a segment
44449e08aacStbbdev ++my_bucket;
44549e08aacStbbdev else my_bucket = my_map->get_bucket( k );
4464146a9e1SRui Ueyama node_base *n = my_bucket->node_list.load(std::memory_order_relaxed);
4474146a9e1SRui Ueyama if( map_base::is_valid(n) ) {
4484146a9e1SRui Ueyama my_node = static_cast<node*>(n);
4494146a9e1SRui Ueyama my_index = k;
4504146a9e1SRui Ueyama return;
45149e08aacStbbdev }
45249e08aacStbbdev ++k;
45349e08aacStbbdev }
45457f524caSIlya Isaev my_bucket = nullptr; my_node = nullptr; my_index = k; // the end
45549e08aacStbbdev }
45649e08aacStbbdev
4579e15720bStbbdev template <typename Key, typename T, typename HashCompare, typename A
4589e15720bStbbdev #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
4599e15720bStbbdev , typename M
4609e15720bStbbdev >
46116b6a651Skboyarinov __TBB_requires(tbb::detail::hash_compare<HashCompare, Key> &&
4624a23d002Skboyarinov ch_map_rw_scoped_lockable<M>)
46316b6a651Skboyarinov #else
46416b6a651Skboyarinov >
46516b6a651Skboyarinov __TBB_requires(tbb::detail::hash_compare<HashCompare, Key>)
46616b6a651Skboyarinov #endif
46749e08aacStbbdev friend class concurrent_hash_map;
46849e08aacStbbdev
hash_map_iterator(const Container & map,std::size_t index,const bucket * b,node_base * n)46949e08aacStbbdev hash_map_iterator( const Container &map, std::size_t index, const bucket *b, node_base *n ) :
47049e08aacStbbdev my_map(&map), my_index(index), my_bucket(b), my_node(static_cast<node*>(n))
47149e08aacStbbdev {
47249e08aacStbbdev if( b && !map_base::is_valid(n) )
47349e08aacStbbdev advance_to_next_bucket();
47449e08aacStbbdev }
47549e08aacStbbdev
47649e08aacStbbdev // concurrent_hash_map over which we are iterating.
47749e08aacStbbdev const Container *my_map;
47849e08aacStbbdev // Index in hash table for current item
47949e08aacStbbdev size_t my_index;
48049e08aacStbbdev // Pointer to bucket
48149e08aacStbbdev const bucket* my_bucket;
48249e08aacStbbdev // Pointer to node that has current item
48349e08aacStbbdev node* my_node;
48449e08aacStbbdev };
48549e08aacStbbdev
48649e08aacStbbdev template <typename Container, typename T, typename U>
48749e08aacStbbdev bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
48849e08aacStbbdev return i.my_node == j.my_node && i.my_map == j.my_map;
48949e08aacStbbdev }
49049e08aacStbbdev
49149e08aacStbbdev template <typename Container, typename T, typename U>
49249e08aacStbbdev bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
49349e08aacStbbdev return i.my_node != j.my_node || i.my_map != j.my_map;
49449e08aacStbbdev }
49549e08aacStbbdev
49649e08aacStbbdev // Range class used with concurrent_hash_map
49749e08aacStbbdev template <typename Iterator>
49849e08aacStbbdev class hash_map_range {
49949e08aacStbbdev using map_type = typename Iterator::map_type;
50049e08aacStbbdev public:
50149e08aacStbbdev // Type for size of a range
50249e08aacStbbdev using size_type = std::size_t;
50349e08aacStbbdev using value_type = typename Iterator::value_type;
50449e08aacStbbdev using reference = typename Iterator::reference;
50549e08aacStbbdev using difference_type = typename Iterator::difference_type;
50649e08aacStbbdev using iterator = Iterator;
50749e08aacStbbdev
50849e08aacStbbdev // True if range is empty.
empty()50949e08aacStbbdev bool empty() const { return my_begin == my_end; }
51049e08aacStbbdev
51149e08aacStbbdev // True if range can be partitioned into two subranges.
is_divisible()51249e08aacStbbdev bool is_divisible() const {
51349e08aacStbbdev return my_midpoint != my_end;
51449e08aacStbbdev }
51549e08aacStbbdev
51649e08aacStbbdev // Split range.
hash_map_range(hash_map_range & r,split)51749e08aacStbbdev hash_map_range( hash_map_range& r, split ) :
51849e08aacStbbdev my_end(r.my_end),
51949e08aacStbbdev my_grainsize(r.my_grainsize)
52049e08aacStbbdev {
52149e08aacStbbdev r.my_end = my_begin = r.my_midpoint;
52249e08aacStbbdev __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
52349e08aacStbbdev __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
52449e08aacStbbdev set_midpoint();
52549e08aacStbbdev r.set_midpoint();
52649e08aacStbbdev }
52749e08aacStbbdev
52849e08aacStbbdev // Init range with container and grainsize specified
52949e08aacStbbdev hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
53049e08aacStbbdev my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list.load(std::memory_order_relaxed) ) ),
53157f524caSIlya Isaev my_end( Iterator( map, map.my_mask.load(std::memory_order_relaxed) + 1, nullptr, nullptr ) ),
53249e08aacStbbdev my_grainsize( grainsize_ )
53349e08aacStbbdev {
53449e08aacStbbdev __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
53549e08aacStbbdev set_midpoint();
53649e08aacStbbdev }
53749e08aacStbbdev
begin()538478de5b1Stbbdev Iterator begin() const { return my_begin; }
end()539478de5b1Stbbdev Iterator end() const { return my_end; }
54049e08aacStbbdev // The grain size for this range.
grainsize()54149e08aacStbbdev size_type grainsize() const { return my_grainsize; }
54249e08aacStbbdev
54349e08aacStbbdev private:
54449e08aacStbbdev Iterator my_begin;
54549e08aacStbbdev Iterator my_end;
54649e08aacStbbdev mutable Iterator my_midpoint;
54749e08aacStbbdev size_t my_grainsize;
54849e08aacStbbdev // Set my_midpoint to point approximately half way between my_begin and my_end.
54949e08aacStbbdev void set_midpoint() const;
55049e08aacStbbdev template <typename U> friend class hash_map_range;
55149e08aacStbbdev };
55249e08aacStbbdev
55349e08aacStbbdev template <typename Iterator>
set_midpoint()55449e08aacStbbdev void hash_map_range<Iterator>::set_midpoint() const {
55549e08aacStbbdev // Split by groups of nodes
55649e08aacStbbdev size_t m = my_end.my_index-my_begin.my_index;
55749e08aacStbbdev if( m > my_grainsize ) {
55849e08aacStbbdev m = my_begin.my_index + m/2u;
55949e08aacStbbdev auto b = my_begin.my_map->get_bucket(m);
56049e08aacStbbdev my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list.load(std::memory_order_relaxed));
56149e08aacStbbdev } else {
56249e08aacStbbdev my_midpoint = my_end;
56349e08aacStbbdev }
56449e08aacStbbdev __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
56549e08aacStbbdev "my_begin is after my_midpoint" );
56649e08aacStbbdev __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
56749e08aacStbbdev "my_midpoint is after my_end" );
56849e08aacStbbdev __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
56949e08aacStbbdev "[my_begin, my_midpoint) range should not be empty" );
57049e08aacStbbdev }
57149e08aacStbbdev
57249e08aacStbbdev template <typename Key, typename T,
5739e15720bStbbdev typename HashCompare = d1::tbb_hash_compare<Key>,
5749e15720bStbbdev typename Allocator = tbb_allocator<std::pair<const Key, T>>
5759e15720bStbbdev #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
5769e15720bStbbdev , typename MutexType = spin_rw_mutex
5779e15720bStbbdev >
__TBB_requires(tbb::detail::hash_compare<HashCompare,Key> && ch_map_rw_scoped_lockable<MutexType>)578478de5b1Stbbdev __TBB_requires(tbb::detail::hash_compare<HashCompare, Key> &&
579478de5b1Stbbdev ch_map_rw_scoped_lockable<MutexType>)
580478de5b1Stbbdev #else
581478de5b1Stbbdev >
582478de5b1Stbbdev __TBB_requires(tbb::detail::hash_compare<HashCompare, Key>)
583478de5b1Stbbdev #endif
5849e15720bStbbdev class concurrent_hash_map
5859e15720bStbbdev #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
5869e15720bStbbdev : protected hash_map_base<Allocator, MutexType>
5879e15720bStbbdev #else
5889e15720bStbbdev : protected hash_map_base<Allocator, spin_rw_mutex>
5899e15720bStbbdev #endif
5909e15720bStbbdev {
59149e08aacStbbdev template <typename Container, typename Value>
59249e08aacStbbdev friend class hash_map_iterator;
59349e08aacStbbdev
59449e08aacStbbdev template <typename I>
59549e08aacStbbdev friend class hash_map_range;
59649e08aacStbbdev using allocator_traits_type = tbb::detail::allocator_traits<Allocator>;
5979e15720bStbbdev
5989e15720bStbbdev #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
5999e15720bStbbdev using base_type = hash_map_base<Allocator, MutexType>;
6009e15720bStbbdev #else
6019e15720bStbbdev using base_type = hash_map_base<Allocator, spin_rw_mutex>;
6029e15720bStbbdev #endif
60349e08aacStbbdev public:
60449e08aacStbbdev using key_type = Key;
60549e08aacStbbdev using mapped_type = T;
606d86ed7fbStbbdev // type_identity is needed to disable implicit deduction guides for std::initializer_list constructors
607d86ed7fbStbbdev // and copy/move constructor with explicit allocator argument
608d86ed7fbStbbdev using allocator_type = tbb::detail::type_identity_t<Allocator>;
609d86ed7fbStbbdev using hash_compare_type = tbb::detail::type_identity_t<HashCompare>;
61049e08aacStbbdev using value_type = std::pair<const Key, T>;
61149e08aacStbbdev using size_type = typename base_type::size_type;
61249e08aacStbbdev using difference_type = std::ptrdiff_t;
6139e15720bStbbdev #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
6149e15720bStbbdev using mutex_type = MutexType;
6159e15720bStbbdev #endif
61649e08aacStbbdev using pointer = typename allocator_traits_type::pointer;
61749e08aacStbbdev using const_pointer = typename allocator_traits_type::const_pointer;
61849e08aacStbbdev
61949e08aacStbbdev using reference = value_type&;
62049e08aacStbbdev using const_reference = const value_type&;
62149e08aacStbbdev using iterator = hash_map_iterator<concurrent_hash_map, value_type>;
62249e08aacStbbdev using const_iterator = hash_map_iterator<concurrent_hash_map, const value_type>;
62349e08aacStbbdev using range_type = hash_map_range<iterator>;
62449e08aacStbbdev using const_range_type = hash_map_range<const_iterator>;
62549e08aacStbbdev
62649e08aacStbbdev protected:
62749e08aacStbbdev static_assert(std::is_same<value_type, typename Allocator::value_type>::value,
62849e08aacStbbdev "value_type of the container must be the same as its allocator's");
62949e08aacStbbdev
63049e08aacStbbdev friend class const_accessor;
63149e08aacStbbdev class node;
63249e08aacStbbdev using segment_index_type = typename base_type::segment_index_type;
63349e08aacStbbdev using segment_ptr_type = typename base_type::segment_ptr_type;
63449e08aacStbbdev using node_base = typename base_type::node_base;
63549e08aacStbbdev using bucket = typename base_type::bucket;
63649e08aacStbbdev using hashcode_type = typename base_type::hashcode_type;
63749e08aacStbbdev using bucket_allocator_type = typename base_type::bucket_allocator_type;
63849e08aacStbbdev using node_allocator_type = typename base_type::allocator_traits_type::template rebind_alloc<node>;
63949e08aacStbbdev using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>;
640d86ed7fbStbbdev hash_compare_type my_hash_compare;
64149e08aacStbbdev
64249e08aacStbbdev class node : public node_base {
64349e08aacStbbdev public:
64449e08aacStbbdev node() {}
64549e08aacStbbdev ~node() {}
64649e08aacStbbdev pointer storage() { return &my_value; }
64749e08aacStbbdev value_type& value() { return *storage(); }
64849e08aacStbbdev private:
64949e08aacStbbdev union {
65049e08aacStbbdev value_type my_value;
65149e08aacStbbdev };
65249e08aacStbbdev };
65349e08aacStbbdev
65449e08aacStbbdev void delete_node( node_base *n ) {
65549e08aacStbbdev node_allocator_type node_allocator(this->get_allocator());
65649e08aacStbbdev node_allocator_traits::destroy(node_allocator, static_cast<node*>(n)->storage());
65749e08aacStbbdev node_allocator_traits::destroy(node_allocator, static_cast<node*>(n));
65849e08aacStbbdev node_allocator_traits::deallocate(node_allocator, static_cast<node*>(n), 1);
65949e08aacStbbdev }
66049e08aacStbbdev
66149e08aacStbbdev template <typename... Args>
66249e08aacStbbdev static node* create_node(bucket_allocator_type& allocator, Args&&... args) {
66349e08aacStbbdev node_allocator_type node_allocator(allocator);
66449e08aacStbbdev node* node_ptr = node_allocator_traits::allocate(node_allocator, 1);
66549e08aacStbbdev auto guard = make_raii_guard([&] {
66649e08aacStbbdev node_allocator_traits::destroy(node_allocator, node_ptr);
66749e08aacStbbdev node_allocator_traits::deallocate(node_allocator, node_ptr, 1);
66849e08aacStbbdev });
66949e08aacStbbdev
67049e08aacStbbdev node_allocator_traits::construct(node_allocator, node_ptr);
67149e08aacStbbdev node_allocator_traits::construct(node_allocator, node_ptr->storage(), std::forward<Args>(args)...);
67249e08aacStbbdev guard.dismiss();
67349e08aacStbbdev return node_ptr;
67449e08aacStbbdev }
67549e08aacStbbdev
67649e08aacStbbdev static node* allocate_node_copy_construct(bucket_allocator_type& allocator, const Key &key, const T * t){
67749e08aacStbbdev return create_node(allocator, key, *t);
67849e08aacStbbdev }
67949e08aacStbbdev
68049e08aacStbbdev static node* allocate_node_move_construct(bucket_allocator_type& allocator, const Key &key, const T * t){
68149e08aacStbbdev return create_node(allocator, key, std::move(*const_cast<T*>(t)));
68249e08aacStbbdev }
68349e08aacStbbdev
6844523a761Stbbdev template <typename K = Key>
6854523a761Stbbdev static node* allocate_node_default_construct(bucket_allocator_type& allocator, const K &key, const T * ){
68649e08aacStbbdev // Emplace construct an empty T object inside the pair
68749e08aacStbbdev return create_node(allocator, std::piecewise_construct,
68849e08aacStbbdev std::forward_as_tuple(key), std::forward_as_tuple());
68949e08aacStbbdev }
69049e08aacStbbdev
69149e08aacStbbdev static node* do_not_allocate_node(bucket_allocator_type& , const Key &, const T * ){
69249e08aacStbbdev __TBB_ASSERT(false,"this dummy function should not be called");
69349e08aacStbbdev return nullptr;
69449e08aacStbbdev }
69549e08aacStbbdev
6964523a761Stbbdev template <typename K>
6974523a761Stbbdev node *search_bucket( const K &key, bucket *b ) const {
69849e08aacStbbdev node *n = static_cast<node*>( b->node_list.load(std::memory_order_relaxed) );
69949e08aacStbbdev while (this->is_valid(n) && !my_hash_compare.equal(key, n->value().first))
70049e08aacStbbdev n = static_cast<node*>( n->next );
7019e15720bStbbdev __TBB_ASSERT(!rehash_required(n), "Search can be executed only for rehashed bucket");
70249e08aacStbbdev return n;
70349e08aacStbbdev }
70449e08aacStbbdev
70549e08aacStbbdev // bucket accessor is to find, rehash, acquire a lock, and access a bucket
70649e08aacStbbdev class bucket_accessor : public bucket::scoped_type {
70749e08aacStbbdev bucket *my_b;
70849e08aacStbbdev public:
70949e08aacStbbdev bucket_accessor( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) { acquire( base, h, writer ); }
71049e08aacStbbdev // find a bucket by masked hashcode, optionally rehash, and acquire the lock
71149e08aacStbbdev inline void acquire( concurrent_hash_map *base, const hashcode_type h, bool writer = false ) {
71249e08aacStbbdev my_b = base->get_bucket( h );
71349e08aacStbbdev // TODO: actually, notification is unnecessary here, just hiding double-check
7149e15720bStbbdev if (rehash_required(my_b->node_list.load(std::memory_order_acquire))
71549e08aacStbbdev && bucket::scoped_type::try_acquire( my_b->mutex, /*write=*/true ) )
71649e08aacStbbdev {
7179e15720bStbbdev if (rehash_required(my_b->node_list.load(std::memory_order_relaxed))) base->rehash_bucket(my_b, h); // recursive rehashing
71849e08aacStbbdev }
71949e08aacStbbdev else bucket::scoped_type::acquire( my_b->mutex, writer );
7209e15720bStbbdev __TBB_ASSERT(!rehash_required(my_b->node_list.load(std::memory_order_relaxed)), nullptr);
72149e08aacStbbdev }
7229e15720bStbbdev
72349e08aacStbbdev // get bucket pointer
72449e08aacStbbdev bucket *operator() () { return my_b; }
72549e08aacStbbdev };
72649e08aacStbbdev
72749e08aacStbbdev // TODO refactor to hash_base
72849e08aacStbbdev void rehash_bucket( bucket *b_new, const hashcode_type hash ) {
72949e08aacStbbdev __TBB_ASSERT( hash > 1, "The lowermost buckets can't be rehashed" );
7309e15720bStbbdev b_new->node_list.store(reinterpret_cast<node_base*>(empty_rehashed_flag), std::memory_order_release); // mark rehashed
731ec39c546SAlex hashcode_type mask = (hashcode_type(1) << tbb::detail::log2(hash)) - 1; // get parent mask from the topmost bit
73249e08aacStbbdev bucket_accessor b_old( this, hash & mask );
73349e08aacStbbdev
73449e08aacStbbdev mask = (mask<<1) | 1; // get full mask for new bucket
73549e08aacStbbdev __TBB_ASSERT( (mask&(mask+1))==0 && (hash & mask) == hash, nullptr );
73649e08aacStbbdev restart:
73749e08aacStbbdev node_base* prev = nullptr;
73849e08aacStbbdev node_base* curr = b_old()->node_list.load(std::memory_order_acquire);
73949e08aacStbbdev while (this->is_valid(curr)) {
74049e08aacStbbdev hashcode_type curr_node_hash = my_hash_compare.hash(static_cast<node*>(curr)->value().first);
74149e08aacStbbdev
74249e08aacStbbdev if ((curr_node_hash & mask) == hash) {
74349e08aacStbbdev if (!b_old.is_writer()) {
74449e08aacStbbdev if (!b_old.upgrade_to_writer()) {
74549e08aacStbbdev goto restart; // node ptr can be invalid due to concurrent erase
74649e08aacStbbdev }
74749e08aacStbbdev }
74849e08aacStbbdev node_base* next = curr->next;
74949e08aacStbbdev // exclude from b_old
75049e08aacStbbdev if (prev == nullptr) {
75149e08aacStbbdev b_old()->node_list.store(curr->next, std::memory_order_relaxed);
75249e08aacStbbdev } else {
75349e08aacStbbdev prev->next = curr->next;
75449e08aacStbbdev }
75549e08aacStbbdev this->add_to_bucket(b_new, curr);
75649e08aacStbbdev curr = next;
75749e08aacStbbdev } else {
75849e08aacStbbdev prev = curr;
75949e08aacStbbdev curr = curr->next;
76049e08aacStbbdev }
76149e08aacStbbdev }
76249e08aacStbbdev }
76349e08aacStbbdev
7644523a761Stbbdev template <typename U>
7654523a761Stbbdev using hash_compare_is_transparent = dependent_bool<comp_is_transparent<hash_compare_type>, U>;
7664523a761Stbbdev
76749e08aacStbbdev public:
76849e08aacStbbdev
76949e08aacStbbdev class accessor;
77049e08aacStbbdev // Combines data access, locking, and garbage collection.
77149e08aacStbbdev class const_accessor : private node::scoped_type /*which derived from no_copy*/ {
7729e15720bStbbdev #if __TBB_PREVIEW_CONCURRENT_HASH_MAP_EXTENSIONS
7739e15720bStbbdev friend class concurrent_hash_map<Key,T,HashCompare,Allocator,MutexType>;
7749e15720bStbbdev #else
77549e08aacStbbdev friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
7769e15720bStbbdev #endif
77749e08aacStbbdev friend class accessor;
77849e08aacStbbdev public:
77949e08aacStbbdev // Type of value
78049e08aacStbbdev using value_type = const typename concurrent_hash_map::value_type;
78149e08aacStbbdev
78249e08aacStbbdev // True if result is empty.
78349e08aacStbbdev bool empty() const { return !my_node; }
78449e08aacStbbdev
78549e08aacStbbdev // Set to null
78649e08aacStbbdev void release() {
78749e08aacStbbdev if( my_node ) {
78849e08aacStbbdev node::scoped_type::release();
789ec39c546SAlex my_node = nullptr;
79049e08aacStbbdev }
79149e08aacStbbdev }
79249e08aacStbbdev
79349e08aacStbbdev // Return reference to associated value in hash table.
79449e08aacStbbdev const_reference operator*() const {
79549e08aacStbbdev __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
79649e08aacStbbdev return my_node->value();
79749e08aacStbbdev }
79849e08aacStbbdev
79949e08aacStbbdev // Return pointer to associated value in hash table.
80049e08aacStbbdev const_pointer operator->() const {
80149e08aacStbbdev return &operator*();
80249e08aacStbbdev }
80349e08aacStbbdev
80449e08aacStbbdev // Create empty result
805ec39c546SAlex const_accessor() : my_node(nullptr), my_hash() {}
80649e08aacStbbdev
80749e08aacStbbdev // Destroy result after releasing the underlying reference.
80849e08aacStbbdev ~const_accessor() {
80949e08aacStbbdev my_node = nullptr; // scoped lock's release() is called in its destructor
81049e08aacStbbdev }
81149e08aacStbbdev protected:
8129e15720bStbbdev bool is_writer() { return node::scoped_type::is_writer(); }
81349e08aacStbbdev node *my_node;
81449e08aacStbbdev hashcode_type my_hash;
81549e08aacStbbdev };
81649e08aacStbbdev
81749e08aacStbbdev // Allows write access to elements and combines data access, locking, and garbage collection.
81849e08aacStbbdev class accessor: public const_accessor {
81949e08aacStbbdev public:
82049e08aacStbbdev // Type of value
82149e08aacStbbdev using value_type = typename concurrent_hash_map::value_type;
82249e08aacStbbdev
82349e08aacStbbdev // Return reference to associated value in hash table.
82449e08aacStbbdev reference operator*() const {
82549e08aacStbbdev __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
82649e08aacStbbdev return this->my_node->value();
82749e08aacStbbdev }
82849e08aacStbbdev
82949e08aacStbbdev // Return pointer to associated value in hash table.
83049e08aacStbbdev pointer operator->() const {
83149e08aacStbbdev return &operator*();
83249e08aacStbbdev }
83349e08aacStbbdev };
83449e08aacStbbdev
835d86ed7fbStbbdev explicit concurrent_hash_map( const hash_compare_type& compare, const allocator_type& a = allocator_type() )
83649e08aacStbbdev : base_type(a)
83749e08aacStbbdev , my_hash_compare(compare)
83849e08aacStbbdev {}
83949e08aacStbbdev
840d86ed7fbStbbdev concurrent_hash_map() : concurrent_hash_map(hash_compare_type()) {}
84149e08aacStbbdev
84249e08aacStbbdev explicit concurrent_hash_map( const allocator_type& a )
843d86ed7fbStbbdev : concurrent_hash_map(hash_compare_type(), a)
84449e08aacStbbdev {}
84549e08aacStbbdev
84649e08aacStbbdev // Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
84749e08aacStbbdev concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
84849e08aacStbbdev : concurrent_hash_map(a)
84949e08aacStbbdev {
85049e08aacStbbdev this->reserve(n);
85149e08aacStbbdev }
85249e08aacStbbdev
853d86ed7fbStbbdev concurrent_hash_map( size_type n, const hash_compare_type& compare, const allocator_type& a = allocator_type() )
85449e08aacStbbdev : concurrent_hash_map(compare, a)
85549e08aacStbbdev {
85649e08aacStbbdev this->reserve(n);
85749e08aacStbbdev }
85849e08aacStbbdev
85949e08aacStbbdev // Copy constructor
86049e08aacStbbdev concurrent_hash_map( const concurrent_hash_map &table )
86149e08aacStbbdev : concurrent_hash_map(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
86249e08aacStbbdev {
86349e08aacStbbdev try_call( [&] {
86449e08aacStbbdev internal_copy(table);
86549e08aacStbbdev }).on_exception( [&] {
86649e08aacStbbdev this->clear();
86749e08aacStbbdev });
86849e08aacStbbdev }
86949e08aacStbbdev
87049e08aacStbbdev concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
87149e08aacStbbdev : concurrent_hash_map(a)
87249e08aacStbbdev {
87349e08aacStbbdev try_call( [&] {
87449e08aacStbbdev internal_copy(table);
87549e08aacStbbdev }).on_exception( [&] {
87649e08aacStbbdev this->clear();
87749e08aacStbbdev });
87849e08aacStbbdev }
87949e08aacStbbdev
88049e08aacStbbdev // Move constructor
88149e08aacStbbdev concurrent_hash_map( concurrent_hash_map &&table )
88249e08aacStbbdev : concurrent_hash_map(std::move(table.get_allocator()))
88349e08aacStbbdev {
88449e08aacStbbdev this->internal_move(std::move(table));
88549e08aacStbbdev }
88649e08aacStbbdev
88749e08aacStbbdev // Move constructor
88849e08aacStbbdev concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a )
88949e08aacStbbdev : concurrent_hash_map(a)
89049e08aacStbbdev {
89149e08aacStbbdev using is_equal_type = typename node_allocator_traits::is_always_equal;
89249e08aacStbbdev internal_move_construct_with_allocator(std::move(table), a, is_equal_type());
89349e08aacStbbdev }
89449e08aacStbbdev
89549e08aacStbbdev // Construction with copying iteration range and given allocator instance
89649e08aacStbbdev template <typename I>
89749e08aacStbbdev concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
89849e08aacStbbdev : concurrent_hash_map(a)
89949e08aacStbbdev {
90049e08aacStbbdev try_call( [&] {
90149e08aacStbbdev internal_copy(first, last, std::distance(first, last));
90249e08aacStbbdev }).on_exception( [&] {
90349e08aacStbbdev this->clear();
90449e08aacStbbdev });
90549e08aacStbbdev }
90649e08aacStbbdev
90749e08aacStbbdev template <typename I>
908d86ed7fbStbbdev concurrent_hash_map( I first, I last, const hash_compare_type& compare, const allocator_type& a = allocator_type() )
90949e08aacStbbdev : concurrent_hash_map(compare, a)
91049e08aacStbbdev {
91149e08aacStbbdev try_call( [&] {
91249e08aacStbbdev internal_copy(first, last, std::distance(first, last));
91349e08aacStbbdev }).on_exception( [&] {
91449e08aacStbbdev this->clear();
91549e08aacStbbdev });
91649e08aacStbbdev }
91749e08aacStbbdev
918d86ed7fbStbbdev concurrent_hash_map( std::initializer_list<value_type> il, const hash_compare_type& compare = hash_compare_type(), const allocator_type& a = allocator_type() )
91949e08aacStbbdev : concurrent_hash_map(compare, a)
92049e08aacStbbdev {
92149e08aacStbbdev try_call( [&] {
92249e08aacStbbdev internal_copy(il.begin(), il.end(), il.size());
92349e08aacStbbdev }).on_exception( [&] {
92449e08aacStbbdev this->clear();
92549e08aacStbbdev });
92649e08aacStbbdev }
92749e08aacStbbdev
928d86ed7fbStbbdev concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type& a )
929d86ed7fbStbbdev : concurrent_hash_map(il, hash_compare_type(), a) {}
930d86ed7fbStbbdev
93149e08aacStbbdev // Assignment
93249e08aacStbbdev concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
93349e08aacStbbdev if( this != &table ) {
93449e08aacStbbdev clear();
93549e08aacStbbdev copy_assign_allocators(this->my_allocator, table.my_allocator);
93649e08aacStbbdev internal_copy(table);
93749e08aacStbbdev }
93849e08aacStbbdev return *this;
93949e08aacStbbdev }
94049e08aacStbbdev
94149e08aacStbbdev // Move Assignment
94249e08aacStbbdev concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
94349e08aacStbbdev if( this != &table ) {
94449e08aacStbbdev using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment;
94549e08aacStbbdev using is_equal_type = typename node_allocator_traits::is_always_equal;
94649e08aacStbbdev move_assign_allocators(this->my_allocator, table.my_allocator);
94749e08aacStbbdev internal_move_assign(std::move(table), tbb::detail::disjunction<is_equal_type, pocma_type>());
94849e08aacStbbdev }
94949e08aacStbbdev return *this;
95049e08aacStbbdev }
95149e08aacStbbdev
95249e08aacStbbdev // Assignment
95349e08aacStbbdev concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
95449e08aacStbbdev clear();
95549e08aacStbbdev internal_copy(il.begin(), il.end(), il.size());
95649e08aacStbbdev return *this;
95749e08aacStbbdev }
95849e08aacStbbdev
95949e08aacStbbdev // Rehashes and optionally resizes the whole table.
96049e08aacStbbdev /** Useful to optimize performance before or after concurrent operations.
96149e08aacStbbdev Also enables using of find() and count() concurrent methods in serial context. */
96249e08aacStbbdev void rehash(size_type sz = 0) {
96349e08aacStbbdev this->reserve(sz); // TODO: add reduction of number of buckets as well
96449e08aacStbbdev hashcode_type mask = this->my_mask.load(std::memory_order_relaxed);
96549e08aacStbbdev hashcode_type b = (mask+1)>>1; // size or first index of the last segment
96649e08aacStbbdev __TBB_ASSERT((b&(b-1))==0, nullptr); // zero or power of 2
96749e08aacStbbdev bucket *bp = this->get_bucket( b ); // only the last segment should be scanned for rehashing
96849e08aacStbbdev for(; b <= mask; b++, bp++ ) {
96949e08aacStbbdev node_base *n = bp->node_list.load(std::memory_order_relaxed);
9709e15720bStbbdev __TBB_ASSERT( this->is_valid(n) || empty_rehashed(n) || rehash_required(n), "Broken internal structure" );
97149e08aacStbbdev __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
9729e15720bStbbdev if (rehash_required(n)) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
97349e08aacStbbdev hashcode_type h = b; bucket *b_old = bp;
97449e08aacStbbdev do {
97549e08aacStbbdev __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
976ec39c546SAlex hashcode_type m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit
97749e08aacStbbdev b_old = this->get_bucket( h &= m );
9789e15720bStbbdev } while( rehash_required(b_old->node_list.load(std::memory_order_relaxed)) );
97949e08aacStbbdev // now h - is index of the root rehashed bucket b_old
98049e08aacStbbdev this->mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
98149e08aacStbbdev node_base* prev = nullptr;
98249e08aacStbbdev node_base* curr = b_old->node_list.load(std::memory_order_relaxed);
98349e08aacStbbdev while (this->is_valid(curr)) {
98449e08aacStbbdev hashcode_type curr_node_hash = my_hash_compare.hash(static_cast<node*>(curr)->value().first);
98549e08aacStbbdev
98649e08aacStbbdev if ((curr_node_hash & mask) != h) { // should be rehashed
98749e08aacStbbdev node_base* next = curr->next;
98849e08aacStbbdev // exclude from b_old
98949e08aacStbbdev if (prev == nullptr) {
99049e08aacStbbdev b_old->node_list.store(curr->next, std::memory_order_relaxed);
99149e08aacStbbdev } else {
99249e08aacStbbdev prev->next = curr->next;
99349e08aacStbbdev }
99449e08aacStbbdev bucket *b_new = this->get_bucket(curr_node_hash & mask);
9959e15720bStbbdev __TBB_ASSERT(!rehash_required(b_new->node_list.load(std::memory_order_relaxed)), "hash() function changed for key in table or internal error");
99649e08aacStbbdev this->add_to_bucket(b_new, curr);
99749e08aacStbbdev curr = next;
99849e08aacStbbdev } else {
99949e08aacStbbdev prev = curr;
100049e08aacStbbdev curr = curr->next;
100149e08aacStbbdev }
100249e08aacStbbdev }
100349e08aacStbbdev }
100449e08aacStbbdev }
100549e08aacStbbdev }
100649e08aacStbbdev
100749e08aacStbbdev // Clear table
100849e08aacStbbdev void clear() {
100949e08aacStbbdev hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
101049e08aacStbbdev __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
101149e08aacStbbdev this->my_size.store(0, std::memory_order_relaxed);
101249e08aacStbbdev segment_index_type s = this->segment_index_of( m );
101349e08aacStbbdev __TBB_ASSERT( s+1 == this->pointers_per_table || !this->my_table[s+1].load(std::memory_order_relaxed), "wrong mask or concurrent grow" );
101449e08aacStbbdev do {
101549e08aacStbbdev __TBB_ASSERT(this->is_valid(this->my_table[s].load(std::memory_order_relaxed)), "wrong mask or concurrent grow" );
101649e08aacStbbdev segment_ptr_type buckets_ptr = this->my_table[s].load(std::memory_order_relaxed);
101749e08aacStbbdev size_type sz = this->segment_size( s ? s : 1 );
101849e08aacStbbdev for( segment_index_type i = 0; i < sz; i++ )
101949e08aacStbbdev for( node_base *n = buckets_ptr[i].node_list.load(std::memory_order_relaxed);
102049e08aacStbbdev this->is_valid(n); n = buckets_ptr[i].node_list.load(std::memory_order_relaxed) )
102149e08aacStbbdev {
102249e08aacStbbdev buckets_ptr[i].node_list.store(n->next, std::memory_order_relaxed);
102349e08aacStbbdev delete_node( n );
102449e08aacStbbdev }
102549e08aacStbbdev this->delete_segment(s);
102649e08aacStbbdev } while(s-- > 0);
102749e08aacStbbdev this->my_mask.store(this->embedded_buckets - 1, std::memory_order_relaxed);
102849e08aacStbbdev }
102949e08aacStbbdev
103049e08aacStbbdev // Clear table and destroy it.
103149e08aacStbbdev ~concurrent_hash_map() { clear(); }
103249e08aacStbbdev
103349e08aacStbbdev //------------------------------------------------------------------------
103449e08aacStbbdev // Parallel algorithm support
103549e08aacStbbdev //------------------------------------------------------------------------
103649e08aacStbbdev range_type range( size_type grainsize=1 ) {
103749e08aacStbbdev return range_type( *this, grainsize );
103849e08aacStbbdev }
103949e08aacStbbdev const_range_type range( size_type grainsize=1 ) const {
104049e08aacStbbdev return const_range_type( *this, grainsize );
104149e08aacStbbdev }
104249e08aacStbbdev
104349e08aacStbbdev //------------------------------------------------------------------------
104449e08aacStbbdev // STL support - not thread-safe methods
104549e08aacStbbdev //------------------------------------------------------------------------
104649e08aacStbbdev iterator begin() { return iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
104749e08aacStbbdev const_iterator begin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
104849e08aacStbbdev const_iterator cbegin() const { return const_iterator( *this, 0, this->my_embedded_segment, this->my_embedded_segment->node_list.load(std::memory_order_relaxed) ); }
104957f524caSIlya Isaev iterator end() { return iterator( *this, 0, nullptr, nullptr ); }
105057f524caSIlya Isaev const_iterator end() const { return const_iterator( *this, 0, nullptr, nullptr ); }
105157f524caSIlya Isaev const_iterator cend() const { return const_iterator( *this, 0, nullptr, nullptr ); }
105249e08aacStbbdev std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
105349e08aacStbbdev std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
105449e08aacStbbdev
10554523a761Stbbdev template <typename K>
10564523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value,
10574523a761Stbbdev std::pair<iterator, iterator>>::type equal_range( const K& key ) {
10584523a761Stbbdev return internal_equal_range(key, end());
10594523a761Stbbdev }
10604523a761Stbbdev
10614523a761Stbbdev template <typename K>
10624523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value,
10634523a761Stbbdev std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const {
10644523a761Stbbdev return internal_equal_range(key, end());
10654523a761Stbbdev }
10664523a761Stbbdev
106749e08aacStbbdev // Number of items in table.
106849e08aacStbbdev size_type size() const { return this->my_size.load(std::memory_order_acquire); }
106949e08aacStbbdev
107049e08aacStbbdev // True if size()==0.
1071b15aabb3Stbbdev __TBB_nodiscard bool empty() const { return size() == 0; }
107249e08aacStbbdev
107349e08aacStbbdev // Upper bound on size.
107449e08aacStbbdev size_type max_size() const {
107549e08aacStbbdev return allocator_traits_type::max_size(base_type::get_allocator());
107649e08aacStbbdev }
107749e08aacStbbdev
107849e08aacStbbdev // Returns the current number of buckets
107949e08aacStbbdev size_type bucket_count() const { return this->my_mask.load(std::memory_order_relaxed) + 1; }
108049e08aacStbbdev
108149e08aacStbbdev // return allocator object
108249e08aacStbbdev allocator_type get_allocator() const { return base_type::get_allocator(); }
108349e08aacStbbdev
108449e08aacStbbdev // swap two instances. Iterators are invalidated
108549e08aacStbbdev void swap(concurrent_hash_map& table) {
108649e08aacStbbdev using pocs_type = typename node_allocator_traits::propagate_on_container_swap;
108749e08aacStbbdev using is_equal_type = typename node_allocator_traits::is_always_equal;
108849e08aacStbbdev swap_allocators(this->my_allocator, table.my_allocator);
108949e08aacStbbdev internal_swap(table, tbb::detail::disjunction<pocs_type, is_equal_type>());
109049e08aacStbbdev }
109149e08aacStbbdev
109249e08aacStbbdev //------------------------------------------------------------------------
109349e08aacStbbdev // concurrent map operations
109449e08aacStbbdev //------------------------------------------------------------------------
109549e08aacStbbdev
109649e08aacStbbdev // Return count of items (0 or 1)
109749e08aacStbbdev size_type count( const Key &key ) const {
10984523a761Stbbdev return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node);
109949e08aacStbbdev }
110049e08aacStbbdev
11014523a761Stbbdev template <typename K>
11024523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value,
11034523a761Stbbdev size_type>::type count( const K& key ) const {
11044523a761Stbbdev return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, nullptr, /*write=*/false, &do_not_allocate_node);
11054523a761Stbbdev }
11064523a761Stbbdev
110749e08aacStbbdev // Find item and acquire a read lock on the item.
110849e08aacStbbdev /** Return true if item is found, false otherwise. */
110949e08aacStbbdev bool find( const_accessor &result, const Key &key ) const {
111049e08aacStbbdev result.release();
11114523a761Stbbdev return const_cast<concurrent_hash_map*>(this)->lookup</*insert*/false>(key, nullptr, &result, /*write=*/false, &do_not_allocate_node );
111249e08aacStbbdev }
111349e08aacStbbdev
111449e08aacStbbdev // Find item and acquire a write lock on the item.
111549e08aacStbbdev /** Return true if item is found, false otherwise. */
111649e08aacStbbdev bool find( accessor &result, const Key &key ) {
111749e08aacStbbdev result.release();
11184523a761Stbbdev return lookup</*insert*/false>(key, nullptr, &result, /*write=*/true, &do_not_allocate_node);
111949e08aacStbbdev }
112049e08aacStbbdev
11214523a761Stbbdev template <typename K>
11224523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value,
11234523a761Stbbdev bool>::type find( const_accessor& result, const K& key ) {
11244523a761Stbbdev result.release();
11254523a761Stbbdev return lookup</*insert*/false>(key, nullptr, &result, /*write=*/false, &do_not_allocate_node);
11264523a761Stbbdev }
11274523a761Stbbdev
11284523a761Stbbdev template <typename K>
11294523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value,
11304523a761Stbbdev bool>::type find( accessor& result, const K& key ) {
11314523a761Stbbdev result.release();
11324523a761Stbbdev return lookup</*insert*/false>(key, nullptr, &result, /*write=*/true, &do_not_allocate_node);
11334523a761Stbbdev }
11344523a761Stbbdev
113549e08aacStbbdev // Insert item (if not already present) and acquire a read lock on the item.
113649e08aacStbbdev /** Returns true if item is new. */
113749e08aacStbbdev bool insert( const_accessor &result, const Key &key ) {
113849e08aacStbbdev result.release();
11394523a761Stbbdev return lookup</*insert*/true>(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<>);
114049e08aacStbbdev }
114149e08aacStbbdev
114249e08aacStbbdev // Insert item (if not already present) and acquire a write lock on the item.
114349e08aacStbbdev /** Returns true if item is new. */
114449e08aacStbbdev bool insert( accessor &result, const Key &key ) {
114549e08aacStbbdev result.release();
11464523a761Stbbdev return lookup</*insert*/true>(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<>);
114749e08aacStbbdev }
114849e08aacStbbdev
11494523a761Stbbdev template <typename K>
11504523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value &&
11514523a761Stbbdev std::is_constructible<key_type, const K&>::value,
11524523a761Stbbdev bool>::type insert( const_accessor& result, const K& key ) {
11534523a761Stbbdev result.release();
115456cb263eSkboyarinov return lookup</*insert*/true>(key, nullptr, &result, /*write=*/false, &allocate_node_default_construct<K>);
11554523a761Stbbdev }
11564523a761Stbbdev
11574523a761Stbbdev template <typename K>
11584523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value &&
11594523a761Stbbdev std::is_constructible<key_type, const K&>::value,
11604523a761Stbbdev bool>::type insert( accessor& result, const K& key ) {
11614523a761Stbbdev result.release();
11624523a761Stbbdev return lookup</*insert*/true>(key, nullptr, &result, /*write=*/true, &allocate_node_default_construct<K>);
11634523a761Stbbdev }
11644523a761Stbbdev
116549e08aacStbbdev // Insert item by copying if there is no such key present already and acquire a read lock on the item.
116649e08aacStbbdev /** Returns true if item is new. */
116749e08aacStbbdev bool insert( const_accessor &result, const value_type &value ) {
116849e08aacStbbdev result.release();
11694523a761Stbbdev return lookup</*insert*/true>(value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct);
117049e08aacStbbdev }
117149e08aacStbbdev
117249e08aacStbbdev // Insert item by copying if there is no such key present already and acquire a write lock on the item.
117349e08aacStbbdev /** Returns true if item is new. */
117449e08aacStbbdev bool insert( accessor &result, const value_type &value ) {
117549e08aacStbbdev result.release();
11764523a761Stbbdev return lookup</*insert*/true>(value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct);
117749e08aacStbbdev }
117849e08aacStbbdev
117949e08aacStbbdev // Insert item by copying if there is no such key present already
118049e08aacStbbdev /** Returns true if item is inserted. */
118149e08aacStbbdev bool insert( const value_type &value ) {
11824523a761Stbbdev return lookup</*insert*/true>(value.first, &value.second, nullptr, /*write=*/false, &allocate_node_copy_construct);
118349e08aacStbbdev }
118449e08aacStbbdev
118549e08aacStbbdev // Insert item by copying if there is no such key present already and acquire a read lock on the item.
118649e08aacStbbdev /** Returns true if item is new. */
118749e08aacStbbdev bool insert( const_accessor &result, value_type && value ) {
118849e08aacStbbdev return generic_move_insert(result, std::move(value));
118949e08aacStbbdev }
119049e08aacStbbdev
119149e08aacStbbdev // Insert item by copying if there is no such key present already and acquire a write lock on the item.
119249e08aacStbbdev /** Returns true if item is new. */
119349e08aacStbbdev bool insert( accessor &result, value_type && value ) {
119449e08aacStbbdev return generic_move_insert(result, std::move(value));
119549e08aacStbbdev }
119649e08aacStbbdev
119749e08aacStbbdev // Insert item by copying if there is no such key present already
119849e08aacStbbdev /** Returns true if item is inserted. */
119949e08aacStbbdev bool insert( value_type && value ) {
120049e08aacStbbdev return generic_move_insert(accessor_not_used(), std::move(value));
120149e08aacStbbdev }
120249e08aacStbbdev
120349e08aacStbbdev // Insert item by copying if there is no such key present already and acquire a read lock on the item.
120449e08aacStbbdev /** Returns true if item is new. */
120549e08aacStbbdev template <typename... Args>
120649e08aacStbbdev bool emplace( const_accessor &result, Args&&... args ) {
120749e08aacStbbdev return generic_emplace(result, std::forward<Args>(args)...);
120849e08aacStbbdev }
120949e08aacStbbdev
121049e08aacStbbdev // Insert item by copying if there is no such key present already and acquire a write lock on the item.
121149e08aacStbbdev /** Returns true if item is new. */
121249e08aacStbbdev template <typename... Args>
121349e08aacStbbdev bool emplace( accessor &result, Args&&... args ) {
121449e08aacStbbdev return generic_emplace(result, std::forward<Args>(args)...);
121549e08aacStbbdev }
121649e08aacStbbdev
121749e08aacStbbdev // Insert item by copying if there is no such key present already
121849e08aacStbbdev /** Returns true if item is inserted. */
121949e08aacStbbdev template <typename... Args>
122049e08aacStbbdev bool emplace( Args&&... args ) {
122149e08aacStbbdev return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
122249e08aacStbbdev }
122349e08aacStbbdev
122449e08aacStbbdev // Insert range [first, last)
122549e08aacStbbdev template <typename I>
122649e08aacStbbdev void insert( I first, I last ) {
122749e08aacStbbdev for ( ; first != last; ++first )
122849e08aacStbbdev insert( *first );
122949e08aacStbbdev }
123049e08aacStbbdev
123149e08aacStbbdev // Insert initializer list
123249e08aacStbbdev void insert( std::initializer_list<value_type> il ) {
123349e08aacStbbdev insert( il.begin(), il.end() );
123449e08aacStbbdev }
123549e08aacStbbdev
123649e08aacStbbdev // Erase item.
123749e08aacStbbdev /** Return true if item was erased by particularly this call. */
123849e08aacStbbdev bool erase( const Key &key ) {
12394523a761Stbbdev return internal_erase(key);
124049e08aacStbbdev }
124149e08aacStbbdev
12424523a761Stbbdev template <typename K>
12434523a761Stbbdev typename std::enable_if<hash_compare_is_transparent<K>::value,
12444523a761Stbbdev bool>::type erase( const K& key ) {
12454523a761Stbbdev return internal_erase(key);
124649e08aacStbbdev }
124749e08aacStbbdev
124849e08aacStbbdev // Erase item by const_accessor.
124949e08aacStbbdev /** Return true if item was erased by particularly this call. */
125049e08aacStbbdev bool erase( const_accessor& item_accessor ) {
125149e08aacStbbdev return exclude( item_accessor );
125249e08aacStbbdev }
125349e08aacStbbdev
125449e08aacStbbdev // Erase item by accessor.
125549e08aacStbbdev /** Return true if item was erased by particularly this call. */
125649e08aacStbbdev bool erase( accessor& item_accessor ) {
125749e08aacStbbdev return exclude( item_accessor );
125849e08aacStbbdev }
125949e08aacStbbdev
126049e08aacStbbdev protected:
12614523a761Stbbdev template <typename K, typename AllocateNodeType>
12624523a761Stbbdev node* allocate_node_helper( const K& key, const T* t, AllocateNodeType allocate_node, std::true_type ) {
12634523a761Stbbdev return allocate_node(base_type::get_allocator(), key, t);
12644523a761Stbbdev }
12654523a761Stbbdev
12664523a761Stbbdev template <typename K, typename AllocateNodeType>
12674523a761Stbbdev node* allocate_node_helper( const K&, const T*, AllocateNodeType, std::false_type ) {
12684523a761Stbbdev __TBB_ASSERT(false, "allocate_node_helper with std::false_type should never been called");
12694523a761Stbbdev return nullptr;
12704523a761Stbbdev }
12714523a761Stbbdev
127249e08aacStbbdev // Insert or find item and optionally acquire a lock on the item.
12734523a761Stbbdev template <bool OpInsert, typename K, typename AllocateNodeType>
127457f524caSIlya Isaev bool lookup( const K &key, const T *t, const_accessor *result, bool write, AllocateNodeType allocate_node, node *tmp_n = nullptr)
127549e08aacStbbdev {
127649e08aacStbbdev __TBB_ASSERT( !result || !result->my_node, nullptr );
127749e08aacStbbdev bool return_value;
127849e08aacStbbdev hashcode_type const h = my_hash_compare.hash( key );
127949e08aacStbbdev hashcode_type m = this->my_mask.load(std::memory_order_acquire);
128049e08aacStbbdev segment_index_type grow_segment = 0;
128149e08aacStbbdev node *n;
128249e08aacStbbdev restart:
128349e08aacStbbdev {//lock scope
128449e08aacStbbdev __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
128549e08aacStbbdev return_value = false;
128649e08aacStbbdev // get bucket
128749e08aacStbbdev bucket_accessor b( this, h & m );
128849e08aacStbbdev // find a node
128949e08aacStbbdev n = search_bucket( key, b() );
12904523a761Stbbdev if( OpInsert ) {
129149e08aacStbbdev // [opt] insert a key
129249e08aacStbbdev if( !n ) {
129349e08aacStbbdev if( !tmp_n ) {
12944523a761Stbbdev tmp_n = allocate_node_helper(key, t, allocate_node, std::integral_constant<bool, OpInsert>{});
129549e08aacStbbdev }
12969e15720bStbbdev while ( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
12979e15720bStbbdev // Rerun search list, in case another thread inserted the intem during the upgrade
129849e08aacStbbdev n = search_bucket(key, b());
129949e08aacStbbdev if (this->is_valid(n)) { // unfortunately, it did
13009e15720bStbbdev if (!b.downgrade_to_reader()) {
13019e15720bStbbdev // If the lock was downgraded with reacquiring the mutex
13029e15720bStbbdev // Rerun search list in case another thread removed the item during the downgrade
13039e15720bStbbdev n = search_bucket(key, b());
13049e15720bStbbdev if (!this->is_valid(n)) {
13059e15720bStbbdev // Unfortunately, it did
13069e15720bStbbdev // We need to try upgrading to writer again
13079e15720bStbbdev continue;
13089e15720bStbbdev }
13099e15720bStbbdev }
131049e08aacStbbdev goto exists;
131149e08aacStbbdev }
131249e08aacStbbdev }
13139e15720bStbbdev
131449e08aacStbbdev if( this->check_mask_race(h, m) )
131549e08aacStbbdev goto restart; // b.release() is done in ~b().
131649e08aacStbbdev // insert and set flag to grow the container
131749e08aacStbbdev grow_segment = this->insert_new_node( b(), n = tmp_n, m );
131857f524caSIlya Isaev tmp_n = nullptr;
131949e08aacStbbdev return_value = true;
132049e08aacStbbdev }
132149e08aacStbbdev } else { // find or count
132249e08aacStbbdev if( !n ) {
132349e08aacStbbdev if( this->check_mask_race( h, m ) )
132449e08aacStbbdev goto restart; // b.release() is done in ~b(). TODO: replace by continue
132549e08aacStbbdev return false;
132649e08aacStbbdev }
132749e08aacStbbdev return_value = true;
132849e08aacStbbdev }
132949e08aacStbbdev exists:
133049e08aacStbbdev if( !result ) goto check_growth;
133149e08aacStbbdev // TODO: the following seems as generic/regular operation
133249e08aacStbbdev // acquire the item
133349e08aacStbbdev if( !result->try_acquire( n->mutex, write ) ) {
133449e08aacStbbdev for( tbb::detail::atomic_backoff backoff(true);; ) {
133549e08aacStbbdev if( result->try_acquire( n->mutex, write ) ) break;
133649e08aacStbbdev if( !backoff.bounded_pause() ) {
133749e08aacStbbdev // the wait takes really long, restart the operation
133849e08aacStbbdev b.release();
13394523a761Stbbdev __TBB_ASSERT( !OpInsert || !return_value, "Can't acquire new item in locked bucket?" );
134049e08aacStbbdev yield();
134149e08aacStbbdev m = this->my_mask.load(std::memory_order_acquire);
134249e08aacStbbdev goto restart;
134349e08aacStbbdev }
134449e08aacStbbdev }
134549e08aacStbbdev }
134649e08aacStbbdev }//lock scope
134749e08aacStbbdev result->my_node = n;
134849e08aacStbbdev result->my_hash = h;
134949e08aacStbbdev check_growth:
135049e08aacStbbdev // [opt] grow the container
135149e08aacStbbdev if( grow_segment ) {
135249e08aacStbbdev this->enable_segment( grow_segment );
135349e08aacStbbdev }
13544523a761Stbbdev if( tmp_n ) // if OpInsert only
135549e08aacStbbdev delete_node( tmp_n );
135649e08aacStbbdev return return_value;
135749e08aacStbbdev }
135849e08aacStbbdev
135949e08aacStbbdev struct accessor_not_used { void release(){}};
136049e08aacStbbdev friend const_accessor* accessor_location( accessor_not_used const& ){ return nullptr;}
136149e08aacStbbdev friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
136249e08aacStbbdev
136349e08aacStbbdev friend bool is_write_access_needed( accessor const& ) { return true;}
136449e08aacStbbdev friend bool is_write_access_needed( const_accessor const& ) { return false;}
136549e08aacStbbdev friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
136649e08aacStbbdev
136749e08aacStbbdev template <typename Accessor>
136849e08aacStbbdev bool generic_move_insert( Accessor && result, value_type && value ) {
136949e08aacStbbdev result.release();
13704523a761Stbbdev return lookup</*insert*/true>(value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct);
137149e08aacStbbdev }
137249e08aacStbbdev
137349e08aacStbbdev template <typename Accessor, typename... Args>
137449e08aacStbbdev bool generic_emplace( Accessor && result, Args &&... args ) {
137549e08aacStbbdev result.release();
137649e08aacStbbdev node * node_ptr = create_node(base_type::get_allocator(), std::forward<Args>(args)...);
13774523a761Stbbdev return lookup</*insert*/true>(node_ptr->value().first, nullptr, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr);
137849e08aacStbbdev }
137949e08aacStbbdev
138049e08aacStbbdev // delete item by accessor
138149e08aacStbbdev bool exclude( const_accessor &item_accessor ) {
138249e08aacStbbdev __TBB_ASSERT( item_accessor.my_node, nullptr );
138349e08aacStbbdev node_base *const exclude_node = item_accessor.my_node;
138449e08aacStbbdev hashcode_type const hash = item_accessor.my_hash;
138549e08aacStbbdev hashcode_type mask = this->my_mask.load(std::memory_order_acquire);
138649e08aacStbbdev do {
138749e08aacStbbdev // get bucket
138849e08aacStbbdev bucket_accessor b( this, hash & mask, /*writer=*/true );
138949e08aacStbbdev node_base* prev = nullptr;
139049e08aacStbbdev node_base* curr = b()->node_list.load(std::memory_order_relaxed);
139149e08aacStbbdev
139249e08aacStbbdev while (curr && curr != exclude_node) {
139349e08aacStbbdev prev = curr;
139449e08aacStbbdev curr = curr->next;
139549e08aacStbbdev }
139649e08aacStbbdev
139749e08aacStbbdev if (curr == nullptr) { // someone else was first
139849e08aacStbbdev if (this->check_mask_race(hash, mask))
139949e08aacStbbdev continue;
140049e08aacStbbdev item_accessor.release();
140149e08aacStbbdev return false;
140249e08aacStbbdev }
140349e08aacStbbdev __TBB_ASSERT( curr == exclude_node, nullptr );
140449e08aacStbbdev // remove from container
140549e08aacStbbdev if (prev == nullptr) {
140649e08aacStbbdev b()->node_list.store(curr->next, std::memory_order_relaxed);
140749e08aacStbbdev } else {
140849e08aacStbbdev prev->next = curr->next;
140949e08aacStbbdev }
141049e08aacStbbdev
141149e08aacStbbdev this->my_size--;
141249e08aacStbbdev break;
141349e08aacStbbdev } while(true);
141449e08aacStbbdev if (!item_accessor.is_writer()) { // need to get exclusive lock
141549e08aacStbbdev item_accessor.upgrade_to_writer(); // return value means nothing here
141649e08aacStbbdev }
141749e08aacStbbdev
141849e08aacStbbdev item_accessor.release();
141949e08aacStbbdev delete_node(exclude_node); // Only one thread can delete it
142049e08aacStbbdev return true;
142149e08aacStbbdev }
142249e08aacStbbdev
14234523a761Stbbdev template <typename K>
14244523a761Stbbdev bool internal_erase( const K& key ) {
14254523a761Stbbdev node_base *erase_node;
14264523a761Stbbdev hashcode_type const hash = my_hash_compare.hash(key);
14274523a761Stbbdev hashcode_type mask = this->my_mask.load(std::memory_order_acquire);
14284523a761Stbbdev restart:
14294523a761Stbbdev {//lock scope
14304523a761Stbbdev // get bucket
14314523a761Stbbdev bucket_accessor b( this, hash & mask );
14324523a761Stbbdev search:
14334523a761Stbbdev node_base* prev = nullptr;
14344523a761Stbbdev erase_node = b()->node_list.load(std::memory_order_relaxed);
14354523a761Stbbdev while (this->is_valid(erase_node) && !my_hash_compare.equal(key, static_cast<node*>(erase_node)->value().first ) ) {
14364523a761Stbbdev prev = erase_node;
14374523a761Stbbdev erase_node = erase_node->next;
14384523a761Stbbdev }
14394523a761Stbbdev
14404523a761Stbbdev if (erase_node == nullptr) { // not found, but mask could be changed
14414523a761Stbbdev if (this->check_mask_race(hash, mask))
14424523a761Stbbdev goto restart;
14434523a761Stbbdev return false;
14444523a761Stbbdev } else if (!b.is_writer() && !b.upgrade_to_writer()) {
14454523a761Stbbdev if (this->check_mask_race(hash, mask)) // contended upgrade, check mask
14464523a761Stbbdev goto restart;
14474523a761Stbbdev goto search;
14484523a761Stbbdev }
14494523a761Stbbdev
14504523a761Stbbdev // remove from container
14514523a761Stbbdev if (prev == nullptr) {
14524523a761Stbbdev b()->node_list.store(erase_node->next, std::memory_order_relaxed);
14534523a761Stbbdev } else {
14544523a761Stbbdev prev->next = erase_node->next;
14554523a761Stbbdev }
14564523a761Stbbdev this->my_size--;
14574523a761Stbbdev }
14584523a761Stbbdev {
14594523a761Stbbdev typename node::scoped_type item_locker( erase_node->mutex, /*write=*/true );
14604523a761Stbbdev }
14614523a761Stbbdev // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
14624523a761Stbbdev delete_node(erase_node); // Only one thread can delete it due to write lock on the bucket
14634523a761Stbbdev return true;
14644523a761Stbbdev }
14654523a761Stbbdev
146649e08aacStbbdev // Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
14674523a761Stbbdev template <typename K, typename I>
14684523a761Stbbdev std::pair<I, I> internal_equal_range( const K& key, I end_ ) const {
146949e08aacStbbdev hashcode_type h = my_hash_compare.hash( key );
147049e08aacStbbdev hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
147149e08aacStbbdev __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
147249e08aacStbbdev h &= m;
147349e08aacStbbdev bucket *b = this->get_bucket( h );
14749e15720bStbbdev while (rehash_required(b->node_list.load(std::memory_order_relaxed))) {
1475ec39c546SAlex m = ( hashcode_type(1) << tbb::detail::log2( h ) ) - 1; // get parent mask from the topmost bit
147649e08aacStbbdev b = this->get_bucket( h &= m );
147749e08aacStbbdev }
147849e08aacStbbdev node *n = search_bucket( key, b );
147949e08aacStbbdev if( !n )
148049e08aacStbbdev return std::make_pair(end_, end_);
148149e08aacStbbdev iterator lower(*this, h, b, n), upper(lower);
148249e08aacStbbdev return std::make_pair(lower, ++upper);
148349e08aacStbbdev }
148449e08aacStbbdev
148549e08aacStbbdev // Copy "source" to *this, where *this must start out empty.
148649e08aacStbbdev void internal_copy( const concurrent_hash_map& source ) {
148749e08aacStbbdev hashcode_type mask = source.my_mask.load(std::memory_order_relaxed);
148849e08aacStbbdev if( this->my_mask.load(std::memory_order_relaxed) == mask ) { // optimized version
148949e08aacStbbdev this->reserve(source.my_size.load(std::memory_order_relaxed)); // TODO: load_factor?
149057f524caSIlya Isaev bucket *dst = nullptr, *src = nullptr;
14919e15720bStbbdev bool rehashing_required = false;
149249e08aacStbbdev for( hashcode_type k = 0; k <= mask; k++ ) {
149349e08aacStbbdev if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
149449e08aacStbbdev else { dst = this->get_bucket( k ); src = source.get_bucket( k ); }
14959e15720bStbbdev __TBB_ASSERT(!rehash_required(dst->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table");
149649e08aacStbbdev node *n = static_cast<node*>( src->node_list.load(std::memory_order_relaxed) );
14979e15720bStbbdev if (rehash_required(n)) { // source is not rehashed, items are in previous buckets
14989e15720bStbbdev rehashing_required = true;
14999e15720bStbbdev dst->node_list.store(reinterpret_cast<node_base*>(rehash_req_flag), std::memory_order_relaxed);
150049e08aacStbbdev } else for(; n; n = static_cast<node*>( n->next ) ) {
150149e08aacStbbdev node* node_ptr = create_node(base_type::get_allocator(), n->value().first, n->value().second);
150249e08aacStbbdev this->add_to_bucket( dst, node_ptr);
150349e08aacStbbdev this->my_size.fetch_add(1, std::memory_order_relaxed);
150449e08aacStbbdev }
150549e08aacStbbdev }
15069e15720bStbbdev if( rehashing_required ) rehash();
150749e08aacStbbdev } else internal_copy(source.begin(), source.end(), source.my_size.load(std::memory_order_relaxed));
150849e08aacStbbdev }
150949e08aacStbbdev
151049e08aacStbbdev template <typename I>
151149e08aacStbbdev void internal_copy( I first, I last, size_type reserve_size ) {
151249e08aacStbbdev this->reserve(reserve_size); // TODO: load_factor?
151349e08aacStbbdev hashcode_type m = this->my_mask.load(std::memory_order_relaxed);
151449e08aacStbbdev for(; first != last; ++first) {
151549e08aacStbbdev hashcode_type h = my_hash_compare.hash( (*first).first );
151649e08aacStbbdev bucket *b = this->get_bucket( h & m );
15179e15720bStbbdev __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), "Invalid bucket in destination table");
151849e08aacStbbdev node* node_ptr = create_node(base_type::get_allocator(), (*first).first, (*first).second);
151949e08aacStbbdev this->add_to_bucket( b, node_ptr );
152049e08aacStbbdev ++this->my_size; // TODO: replace by non-atomic op
152149e08aacStbbdev }
152249e08aacStbbdev }
152349e08aacStbbdev
152449e08aacStbbdev void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type&,
152549e08aacStbbdev /*is_always_equal=*/std::true_type )
152649e08aacStbbdev {
152749e08aacStbbdev this->internal_move(std::move(other));
152849e08aacStbbdev }
152949e08aacStbbdev
153049e08aacStbbdev void internal_move_construct_with_allocator( concurrent_hash_map&& other, const allocator_type& a,
153149e08aacStbbdev /*is_always_equal=*/std::false_type )
153249e08aacStbbdev {
153349e08aacStbbdev if (a == other.get_allocator()){
153449e08aacStbbdev this->internal_move(std::move(other));
153549e08aacStbbdev } else {
153649e08aacStbbdev try_call( [&] {
153749e08aacStbbdev internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()),
153849e08aacStbbdev other.size());
153949e08aacStbbdev }).on_exception( [&] {
154049e08aacStbbdev this->clear();
154149e08aacStbbdev });
154249e08aacStbbdev }
154349e08aacStbbdev }
154449e08aacStbbdev
154549e08aacStbbdev void internal_move_assign( concurrent_hash_map&& other,
154649e08aacStbbdev /*is_always_equal || POCMA = */std::true_type)
154749e08aacStbbdev {
154849e08aacStbbdev this->internal_move(std::move(other));
154949e08aacStbbdev }
155049e08aacStbbdev
155149e08aacStbbdev void internal_move_assign(concurrent_hash_map&& other, /*is_always_equal=*/ std::false_type) {
155249e08aacStbbdev if (this->my_allocator == other.my_allocator) {
155349e08aacStbbdev this->internal_move(std::move(other));
155449e08aacStbbdev } else {
155549e08aacStbbdev //do per element move
155649e08aacStbbdev internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()),
155749e08aacStbbdev other.size());
155849e08aacStbbdev }
155949e08aacStbbdev }
156049e08aacStbbdev
156149e08aacStbbdev void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::true_type) {
156249e08aacStbbdev this->internal_swap_content(other);
156349e08aacStbbdev }
156449e08aacStbbdev
156549e08aacStbbdev void internal_swap(concurrent_hash_map& other, /*is_always_equal || POCS = */ std::false_type) {
156649e08aacStbbdev __TBB_ASSERT(this->my_allocator == other.my_allocator, nullptr);
156749e08aacStbbdev this->internal_swap_content(other);
156849e08aacStbbdev }
156949e08aacStbbdev
157049e08aacStbbdev // Fast find when no concurrent erasure is used. For internal use inside TBB only!
157149e08aacStbbdev /** Return pointer to item with given key, or nullptr if no such item exists.
157249e08aacStbbdev Must not be called concurrently with erasure operations. */
157349e08aacStbbdev const_pointer internal_fast_find( const Key& key ) const {
157449e08aacStbbdev hashcode_type h = my_hash_compare.hash( key );
157549e08aacStbbdev hashcode_type m = this->my_mask.load(std::memory_order_acquire);
157649e08aacStbbdev node *n;
157749e08aacStbbdev restart:
157849e08aacStbbdev __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
157949e08aacStbbdev bucket *b = this->get_bucket( h & m );
158049e08aacStbbdev // TODO: actually, notification is unnecessary here, just hiding double-check
15819e15720bStbbdev if (rehash_required(b->node_list.load(std::memory_order_acquire)))
158249e08aacStbbdev {
158349e08aacStbbdev typename bucket::scoped_type lock;
158449e08aacStbbdev if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
15859e15720bStbbdev if (rehash_required(b->node_list.load(std::memory_order_relaxed)))
158649e08aacStbbdev const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
158749e08aacStbbdev }
158849e08aacStbbdev else lock.acquire( b->mutex, /*write=*/false );
15899e15720bStbbdev __TBB_ASSERT(!rehash_required(b->node_list.load(std::memory_order_relaxed)), nullptr);
159049e08aacStbbdev }
159149e08aacStbbdev n = search_bucket( key, b );
159249e08aacStbbdev if( n )
159349e08aacStbbdev return n->storage();
159449e08aacStbbdev else if( this->check_mask_race( h, m ) )
159549e08aacStbbdev goto restart;
159657f524caSIlya Isaev return nullptr;
159749e08aacStbbdev }
159849e08aacStbbdev };
159949e08aacStbbdev
160049e08aacStbbdev #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1601d86ed7fbStbbdev template <typename It,
16029e15720bStbbdev typename HashCompare = d1::tbb_hash_compare<iterator_key_t<It>>,
1603d86ed7fbStbbdev typename Alloc = tbb_allocator<iterator_alloc_pair_t<It>>,
1604d86ed7fbStbbdev typename = std::enable_if_t<is_input_iterator_v<It>>,
1605d86ed7fbStbbdev typename = std::enable_if_t<is_allocator_v<Alloc>>,
1606d86ed7fbStbbdev typename = std::enable_if_t<!is_allocator_v<HashCompare>>>
1607d86ed7fbStbbdev concurrent_hash_map( It, It, HashCompare = HashCompare(), Alloc = Alloc() )
1608d86ed7fbStbbdev -> concurrent_hash_map<iterator_key_t<It>, iterator_mapped_t<It>, HashCompare, Alloc>;
160949e08aacStbbdev
1610d86ed7fbStbbdev template <typename It, typename Alloc,
1611d86ed7fbStbbdev typename = std::enable_if_t<is_input_iterator_v<It>>,
1612d86ed7fbStbbdev typename = std::enable_if_t<is_allocator_v<Alloc>>>
1613d86ed7fbStbbdev concurrent_hash_map( It, It, Alloc )
16149e15720bStbbdev -> concurrent_hash_map<iterator_key_t<It>, iterator_mapped_t<It>, d1::tbb_hash_compare<iterator_key_t<It>>, Alloc>;
161549e08aacStbbdev
1616d86ed7fbStbbdev template <typename Key, typename T,
16179e15720bStbbdev typename HashCompare = d1::tbb_hash_compare<std::remove_const_t<Key>>,
1618d86ed7fbStbbdev typename Alloc = tbb_allocator<std::pair<const Key, T>>,
1619d86ed7fbStbbdev typename = std::enable_if_t<is_allocator_v<Alloc>>,
1620d86ed7fbStbbdev typename = std::enable_if_t<!is_allocator_v<HashCompare>>>
1621d86ed7fbStbbdev concurrent_hash_map( std::initializer_list<std::pair<Key, T>>, HashCompare = HashCompare(), Alloc = Alloc() )
1622d86ed7fbStbbdev -> concurrent_hash_map<std::remove_const_t<Key>, T, HashCompare, Alloc>;
1623d86ed7fbStbbdev
1624d86ed7fbStbbdev template <typename Key, typename T, typename Alloc,
1625d86ed7fbStbbdev typename = std::enable_if_t<is_allocator_v<Alloc>>>
1626d86ed7fbStbbdev concurrent_hash_map( std::initializer_list<std::pair<Key, T>>, Alloc )
16279e15720bStbbdev -> concurrent_hash_map<std::remove_const_t<Key>, T, d1::tbb_hash_compare<std::remove_const_t<Key>>, Alloc>;
162849e08aacStbbdev
162949e08aacStbbdev #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
163049e08aacStbbdev
163149e08aacStbbdev template <typename Key, typename T, typename HashCompare, typename A1, typename A2>
163249e08aacStbbdev inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
163349e08aacStbbdev if(a.size() != b.size()) return false;
163449e08aacStbbdev typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
163549e08aacStbbdev typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
163649e08aacStbbdev for(; i != i_end; ++i) {
163749e08aacStbbdev j = b.equal_range(i->first).first;
163849e08aacStbbdev if( j == j_end || !(i->second == j->second) ) return false;
163949e08aacStbbdev }
164049e08aacStbbdev return true;
164149e08aacStbbdev }
164249e08aacStbbdev
1643b15aabb3Stbbdev #if !__TBB_CPP20_COMPARISONS_PRESENT
164449e08aacStbbdev template <typename Key, typename T, typename HashCompare, typename A1, typename A2>
164549e08aacStbbdev inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
164649e08aacStbbdev { return !(a == b); }
1647b15aabb3Stbbdev #endif // !__TBB_CPP20_COMPARISONS_PRESENT
164849e08aacStbbdev
164949e08aacStbbdev template <typename Key, typename T, typename HashCompare, typename A>
swap(concurrent_hash_map<Key,T,HashCompare,A> & a,concurrent_hash_map<Key,T,HashCompare,A> & b)165049e08aacStbbdev inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
165149e08aacStbbdev { a.swap( b ); }
165249e08aacStbbdev
16539e15720bStbbdev } // namespace d2
165449e08aacStbbdev } // namespace detail
165549e08aacStbbdev
165649e08aacStbbdev inline namespace v1 {
165749e08aacStbbdev using detail::split;
16589e15720bStbbdev using detail::d2::concurrent_hash_map;
165949e08aacStbbdev using detail::d1::tbb_hash_compare;
166049e08aacStbbdev } // namespace v1
166149e08aacStbbdev
166249e08aacStbbdev } // namespace tbb
166349e08aacStbbdev
166449e08aacStbbdev #endif /* __TBB_concurrent_hash_map_H */
1665