1 /*
2     Copyright (c) 2005-2023 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_detail__concurrent_unordered_base_H
18 #define __TBB_detail__concurrent_unordered_base_H
19 
20 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H)
21 #error Do not #include this internal file directly; use public TBB headers instead.
22 #endif
23 
24 #include "_range_common.h"
25 #include "_containers_helpers.h"
26 #include "_segment_table.h"
27 #include "_hash_compare.h"
28 #include "_allocator_traits.h"
29 #include "_node_handle.h"
30 #include "_assert.h"
31 #include "_utils.h"
32 #include "_exception.h"
33 #include <iterator>
34 #include <utility>
35 #include <functional>
36 #include <initializer_list>
37 #include <atomic>
38 #include <type_traits>
39 #include <memory>
40 #include <algorithm>
41 
42 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
43 #pragma warning(push)
44 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
45 #endif
46 
47 namespace tbb {
48 namespace detail {
49 namespace d1 {
50 
51 template <typename Traits>
52 class concurrent_unordered_base;
53 
54 template<typename Container, typename Value>
55 class solist_iterator {
56 private:
57     using node_ptr = typename Container::value_node_ptr;
58     template <typename T, typename Allocator>
59     friend class split_ordered_list;
60     template<typename M, typename V>
61     friend class solist_iterator;
62     template <typename Traits>
63     friend class concurrent_unordered_base;
64     template<typename M, typename T, typename U>
65     friend bool operator==( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
66     template<typename M, typename T, typename U>
67     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
68 public:
69     using value_type = Value;
70     using difference_type = typename Container::difference_type;
71     using pointer = value_type*;
72     using reference = value_type&;
73     using iterator_category = std::forward_iterator_tag;
74 
solist_iterator()75     solist_iterator() : my_node_ptr(nullptr) {}
solist_iterator(const solist_iterator<Container,typename Container::value_type> & other)76     solist_iterator( const solist_iterator<Container, typename Container::value_type>& other )
77         : my_node_ptr(other.my_node_ptr) {}
78 
79     solist_iterator& operator=( const solist_iterator<Container, typename Container::value_type>& other ) {
80         my_node_ptr = other.my_node_ptr;
81         return *this;
82     }
83 
84     reference operator*() const {
85         return my_node_ptr->value();
86     }
87 
88     pointer operator->() const {
89         return my_node_ptr->storage();
90     }
91 
92     solist_iterator& operator++() {
93         auto next_node = my_node_ptr->next();
94         while(next_node && next_node->is_dummy()) {
95             next_node = next_node->next();
96         }
97         my_node_ptr = static_cast<node_ptr>(next_node);
98         return *this;
99     }
100 
101     solist_iterator operator++(int) {
102         solist_iterator tmp = *this;
103         ++*this;
104         return tmp;
105     }
106 
107 private:
solist_iterator(node_ptr pnode)108     solist_iterator( node_ptr pnode ) : my_node_ptr(pnode) {}
109 
get_node_ptr()110     node_ptr get_node_ptr() const { return my_node_ptr; }
111 
112     node_ptr my_node_ptr;
113 };
114 
115 template<typename Solist, typename T, typename U>
116 bool operator==( const solist_iterator<Solist, T>& i, const solist_iterator<Solist, U>& j ) {
117     return i.my_node_ptr == j.my_node_ptr;
118 }
119 
120 template<typename Solist, typename T, typename U>
121 bool operator!=( const solist_iterator<Solist, T>& i, const solist_iterator<Solist, U>& j ) {
122     return i.my_node_ptr != j.my_node_ptr;
123 }
124 
125 template <typename SokeyType>
126 class list_node {
127 public:
128     using node_ptr = list_node*;
129     using sokey_type = SokeyType;
130 
list_node(sokey_type key)131     list_node(sokey_type key) : my_next(nullptr), my_order_key(key) {}
132 
init(sokey_type key)133     void init( sokey_type key ) {
134         my_order_key = key;
135     }
136 
order_key()137     sokey_type order_key() const {
138         return my_order_key;
139     }
140 
is_dummy()141     bool is_dummy() {
142         // The last bit of order key is unset for dummy nodes
143         return (my_order_key & 0x1) == 0;
144     }
145 
next()146     node_ptr next() const {
147         return my_next.load(std::memory_order_acquire);
148     }
149 
set_next(node_ptr next_node)150     void set_next( node_ptr next_node ) {
151         my_next.store(next_node, std::memory_order_release);
152     }
153 
try_set_next(node_ptr expected_next,node_ptr new_next)154     bool try_set_next( node_ptr expected_next, node_ptr new_next ) {
155         return my_next.compare_exchange_strong(expected_next, new_next);
156     }
157 
158 private:
159     std::atomic<node_ptr> my_next;
160     sokey_type my_order_key;
161 }; // class list_node
162 
163 template <typename ValueType, typename SokeyType>
164 class value_node : public list_node<SokeyType>
165 {
166 public:
167     using base_type = list_node<SokeyType>;
168     using sokey_type = typename base_type::sokey_type;
169     using value_type = ValueType;
170 
value_node(sokey_type ord_key)171     value_node( sokey_type ord_key ) : base_type(ord_key) {}
~value_node()172     ~value_node() {}
storage()173     value_type* storage() {
174         return reinterpret_cast<value_type*>(&my_value);
175     }
176 
value()177     value_type& value() {
178         return *storage();
179     }
180 
181 private:
182     using aligned_storage_type = typename std::aligned_storage<sizeof(value_type)>::type;
183     aligned_storage_type my_value;
184 }; // class value_node
185 
186 template <typename Traits>
187 class concurrent_unordered_base {
188     using self_type = concurrent_unordered_base<Traits>;
189     using traits_type = Traits;
190     using hash_compare_type = typename traits_type::hash_compare_type;
191     class unordered_segment_table;
192 public:
193     using value_type = typename traits_type::value_type;
194     using key_type = typename traits_type::key_type;
195     using allocator_type = typename traits_type::allocator_type;
196 
197 private:
198     using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
199     // TODO: check assert conditions for different C++ standards
200     static_assert(std::is_same<typename allocator_traits_type::value_type, value_type>::value,
201                   "value_type of the container must be the same as its allocator");
202     using sokey_type = std::size_t;
203 
204 public:
205     using size_type = std::size_t;
206     using difference_type = std::ptrdiff_t;
207 
208     using iterator = solist_iterator<self_type, value_type>;
209     using const_iterator = solist_iterator<self_type, const value_type>;
210     using local_iterator = iterator;
211     using const_local_iterator = const_iterator;
212 
213     using reference = value_type&;
214     using const_reference = const value_type&;
215     using pointer = typename allocator_traits_type::pointer;
216     using const_pointer = typename allocator_traits_type::const_pointer;
217 
218     using hasher = typename hash_compare_type::hasher;
219     using key_equal = typename hash_compare_type::key_equal;
220 
221 private:
222     using list_node_type = list_node<sokey_type>;
223     using value_node_type = value_node<value_type, sokey_type>;
224     using node_ptr = list_node_type*;
225     using value_node_ptr = value_node_type*;
226 
227     using value_node_allocator_type = typename allocator_traits_type::template rebind_alloc<value_node_type>;
228     using node_allocator_type = typename allocator_traits_type::template rebind_alloc<list_node_type>;
229 
230     using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>;
231     using value_node_allocator_traits = tbb::detail::allocator_traits<value_node_allocator_type>;
232 
round_up_to_power_of_two(size_type bucket_count)233     static constexpr size_type round_up_to_power_of_two( size_type bucket_count ) {
234         return size_type(1) << size_type(tbb::detail::log2(uintptr_t(bucket_count == 0 ? 1 : bucket_count) * 2 - 1));
235     }
236 
237     template <typename T>
238     using is_transparent = dependent_bool<has_transparent_key_equal<key_type, hasher, key_equal>, T>;
239 public:
240     using node_type = node_handle<key_type, value_type, value_node_type, allocator_type>;
241 
242     explicit concurrent_unordered_base( size_type bucket_count, const hasher& hash = hasher(),
243                                         const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() )
244         : my_size(0),
245           my_bucket_count(round_up_to_power_of_two(bucket_count)),
246           my_max_load_factor(float(initial_max_load_factor)),
247           my_hash_compare(hash, equal),
248           my_head(sokey_type(0)),
249           my_segments(alloc) {}
250 
concurrent_unordered_base()251     concurrent_unordered_base() : concurrent_unordered_base(initial_bucket_count) {}
252 
concurrent_unordered_base(size_type bucket_count,const allocator_type & alloc)253     concurrent_unordered_base( size_type bucket_count, const allocator_type& alloc )
254         : concurrent_unordered_base(bucket_count, hasher(), key_equal(), alloc) {}
255 
concurrent_unordered_base(size_type bucket_count,const hasher & hash,const allocator_type & alloc)256     concurrent_unordered_base( size_type bucket_count, const hasher& hash, const allocator_type& alloc )
257         : concurrent_unordered_base(bucket_count, hash, key_equal(), alloc) {}
258 
concurrent_unordered_base(const allocator_type & alloc)259     explicit concurrent_unordered_base( const allocator_type& alloc )
260         : concurrent_unordered_base(initial_bucket_count, hasher(), key_equal(), alloc) {}
261 
262     template <typename InputIterator>
263     concurrent_unordered_base( InputIterator first, InputIterator last,
264                                size_type bucket_count = initial_bucket_count, const hasher& hash = hasher(),
265                                const key_equal& equal = key_equal(), const allocator_type& alloc = allocator_type() )
concurrent_unordered_base(bucket_count,hash,equal,alloc)266         : concurrent_unordered_base(bucket_count, hash, equal, alloc)
267     {
268         insert(first, last);
269     }
270 
271     template <typename InputIterator>
concurrent_unordered_base(InputIterator first,InputIterator last,size_type bucket_count,const allocator_type & alloc)272     concurrent_unordered_base( InputIterator first, InputIterator last,
273                                size_type bucket_count, const allocator_type& alloc )
274         : concurrent_unordered_base(first, last, bucket_count, hasher(), key_equal(), alloc) {}
275 
276     template <typename InputIterator>
concurrent_unordered_base(InputIterator first,InputIterator last,size_type bucket_count,const hasher & hash,const allocator_type & alloc)277     concurrent_unordered_base( InputIterator first, InputIterator last,
278                                size_type bucket_count, const hasher& hash, const allocator_type& alloc )
279         : concurrent_unordered_base(first, last, bucket_count, hash, key_equal(), alloc) {}
280 
concurrent_unordered_base(const concurrent_unordered_base & other)281     concurrent_unordered_base( const concurrent_unordered_base& other )
282         : my_size(other.my_size.load(std::memory_order_relaxed)),
283           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
284           my_max_load_factor(other.my_max_load_factor),
285           my_hash_compare(other.my_hash_compare),
286           my_head(other.my_head.order_key()),
287           my_segments(other.my_segments)
288     {
289         try_call( [&] {
290             internal_copy(other);
291         } ).on_exception( [&] {
292             clear();
293         });
294     }
295 
concurrent_unordered_base(const concurrent_unordered_base & other,const allocator_type & alloc)296     concurrent_unordered_base( const concurrent_unordered_base& other, const allocator_type& alloc )
297         : my_size(other.my_size.load(std::memory_order_relaxed)),
298           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
299           my_max_load_factor(other.my_max_load_factor),
300           my_hash_compare(other.my_hash_compare),
301           my_head(other.my_head.order_key()),
302           my_segments(other.my_segments, alloc)
303     {
304         try_call( [&] {
305             internal_copy(other);
306         } ).on_exception( [&] {
307             clear();
308         });
309     }
310 
concurrent_unordered_base(concurrent_unordered_base && other)311     concurrent_unordered_base( concurrent_unordered_base&& other )
312         : my_size(other.my_size.load(std::memory_order_relaxed)),
313           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
314           my_max_load_factor(std::move(other.my_max_load_factor)),
315           my_hash_compare(std::move(other.my_hash_compare)),
316           my_head(other.my_head.order_key()),
317           my_segments(std::move(other.my_segments))
318     {
319         move_content(std::move(other));
320     }
321 
concurrent_unordered_base(concurrent_unordered_base && other,const allocator_type & alloc)322     concurrent_unordered_base( concurrent_unordered_base&& other, const allocator_type& alloc )
323         : my_size(other.my_size.load(std::memory_order_relaxed)),
324           my_bucket_count(other.my_bucket_count.load(std::memory_order_relaxed)),
325           my_max_load_factor(std::move(other.my_max_load_factor)),
326           my_hash_compare(std::move(other.my_hash_compare)),
327           my_head(other.my_head.order_key()),
328           my_segments(std::move(other.my_segments), alloc)
329     {
330         using is_always_equal = typename allocator_traits_type::is_always_equal;
331         internal_move_construct_with_allocator(std::move(other), alloc, is_always_equal());
332     }
333 
334     concurrent_unordered_base( std::initializer_list<value_type> init,
335                                size_type bucket_count = initial_bucket_count,
336                                const hasher& hash = hasher(), const key_equal& equal = key_equal(),
337                                const allocator_type& alloc = allocator_type() )
338         : concurrent_unordered_base(init.begin(), init.end(), bucket_count, hash, equal, alloc) {}
339 
concurrent_unordered_base(std::initializer_list<value_type> init,size_type bucket_count,const allocator_type & alloc)340     concurrent_unordered_base( std::initializer_list<value_type> init,
341                                size_type bucket_count, const allocator_type& alloc )
342         : concurrent_unordered_base(init, bucket_count, hasher(), key_equal(), alloc) {}
343 
concurrent_unordered_base(std::initializer_list<value_type> init,size_type bucket_count,const hasher & hash,const allocator_type & alloc)344     concurrent_unordered_base( std::initializer_list<value_type> init,
345                                size_type bucket_count, const hasher& hash, const allocator_type& alloc )
346         : concurrent_unordered_base(init, bucket_count, hash, key_equal(), alloc) {}
347 
~concurrent_unordered_base()348     ~concurrent_unordered_base() {
349         internal_clear();
350     }
351 
352     concurrent_unordered_base& operator=( const concurrent_unordered_base& other ) {
353         if (this != &other) {
354             clear();
355             my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
356             my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
357             my_max_load_factor = other.my_max_load_factor;
358             my_hash_compare = other.my_hash_compare;
359             my_segments = other.my_segments;
360             internal_copy(other); // TODO: guards for exceptions?
361         }
362         return *this;
363     }
364 
noexcept(unordered_segment_table::is_noexcept_assignment)365     concurrent_unordered_base& operator=( concurrent_unordered_base&& other ) noexcept(unordered_segment_table::is_noexcept_assignment) {
366         if (this != &other) {
367             clear();
368             my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
369             my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
370             my_max_load_factor = std::move(other.my_max_load_factor);
371             my_hash_compare = std::move(other.my_hash_compare);
372             my_segments = std::move(other.my_segments);
373 
374             using pocma_type = typename allocator_traits_type::propagate_on_container_move_assignment;
375             using is_always_equal = typename allocator_traits_type::is_always_equal;
376             internal_move_assign(std::move(other), tbb::detail::disjunction<pocma_type, is_always_equal>());
377         }
378         return *this;
379     }
380 
381     concurrent_unordered_base& operator=( std::initializer_list<value_type> init ) {
382         clear();
383         insert(init);
384         return *this;
385     }
386 
swap(concurrent_unordered_base & other)387     void swap( concurrent_unordered_base& other ) noexcept(unordered_segment_table::is_noexcept_swap) {
388         if (this != &other) {
389             using pocs_type = typename allocator_traits_type::propagate_on_container_swap;
390             using is_always_equal = typename allocator_traits_type::is_always_equal;
391             internal_swap(other, tbb::detail::disjunction<pocs_type, is_always_equal>());
392         }
393     }
394 
get_allocator()395     allocator_type get_allocator() const noexcept { return my_segments.get_allocator(); }
396 
begin()397     iterator begin() noexcept { return iterator(first_value_node(&my_head)); }
begin()398     const_iterator begin() const noexcept { return const_iterator(first_value_node(const_cast<node_ptr>(&my_head))); }
cbegin()399     const_iterator cbegin() const noexcept { return const_iterator(first_value_node(const_cast<node_ptr>(&my_head))); }
400 
end()401     iterator end() noexcept { return iterator(nullptr); }
end()402     const_iterator end() const noexcept { return const_iterator(nullptr); }
cend()403     const_iterator cend() const noexcept { return const_iterator(nullptr); }
404 
empty()405     __TBB_nodiscard bool empty() const noexcept { return size() == 0; }
size()406     size_type size() const noexcept { return my_size.load(std::memory_order_relaxed); }
max_size()407     size_type max_size() const noexcept { return allocator_traits_type::max_size(get_allocator()); }
408 
clear()409     void clear() noexcept {
410         internal_clear();
411     }
412 
insert(const value_type & value)413     std::pair<iterator, bool> insert( const value_type& value ) {
414         return internal_insert_value(value);
415     }
416 
insert(value_type && value)417     std::pair<iterator, bool> insert( value_type&& value ) {
418         return internal_insert_value(std::move(value));
419     }
420 
insert(const_iterator,const value_type & value)421     iterator insert( const_iterator, const value_type& value ) {
422         // Ignore hint
423         return insert(value).first;
424     }
425 
insert(const_iterator,value_type && value)426     iterator insert( const_iterator, value_type&& value ) {
427         // Ignore hint
428         return insert(std::move(value)).first;
429     }
430 
431     template <typename InputIterator>
insert(InputIterator first,InputIterator last)432     void insert( InputIterator first, InputIterator last ) {
433         for (; first != last; ++first) {
434             insert(*first);
435         }
436     }
437 
insert(std::initializer_list<value_type> init)438     void insert( std::initializer_list<value_type> init ) {
439         insert(init.begin(), init.end());
440     }
441 
insert(node_type && nh)442     std::pair<iterator, bool> insert( node_type&& nh ) {
443         if (!nh.empty()) {
444             value_node_ptr insert_node = node_handle_accessor::get_node_ptr(nh);
445             auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr {
446                 insert_node->init(order_key);
447                 return insert_node;
448             };
449             auto insert_result = internal_insert(insert_node->value(), init_node);
450             if (insert_result.inserted) {
451                 // If the insertion succeeded - set node handle to the empty state
452                 __TBB_ASSERT(insert_result.remaining_node == nullptr,
453                             "internal_insert_node should not return the remaining node if the insertion succeeded");
454                 node_handle_accessor::deactivate(nh);
455             }
456             return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
457         }
458         return {end(), false};
459     }
460 
insert(const_iterator,node_type && nh)461     iterator insert( const_iterator, node_type&& nh ) {
462         // Ignore hint
463         return insert(std::move(nh)).first;
464     }
465 
466     template <typename... Args>
emplace(Args &&...args)467     std::pair<iterator, bool> emplace( Args&&... args ) {
468         // Create a node with temporary order_key 0, which will be reinitialize
469         // in internal_insert after the hash calculation
470         value_node_ptr insert_node = create_node(0, std::forward<Args>(args)...);
471 
472         auto init_node = [&insert_node]( sokey_type order_key )->value_node_ptr {
473             insert_node->init(order_key);
474             return insert_node;
475         };
476 
477         auto insert_result = internal_insert(insert_node->value(), init_node);
478 
479         if (!insert_result.inserted) {
480             // If the insertion failed - destroy the node which was created
481             insert_node->init(split_order_key_regular(1));
482             destroy_node(insert_node);
483         }
484 
485         return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
486     }
487 
488     template <typename... Args>
emplace_hint(const_iterator,Args &&...args)489     iterator emplace_hint( const_iterator, Args&&... args ) {
490         // Ignore hint
491         return emplace(std::forward<Args>(args)...).first;
492     }
493 
unsafe_erase(const_iterator pos)494     iterator unsafe_erase( const_iterator pos ) {
495         return iterator(first_value_node(internal_erase(pos.get_node_ptr())));
496     }
497 
unsafe_erase(iterator pos)498     iterator unsafe_erase( iterator pos ) {
499         return iterator(first_value_node(internal_erase(pos.get_node_ptr())));
500     }
501 
unsafe_erase(const_iterator first,const_iterator last)502     iterator unsafe_erase( const_iterator first, const_iterator last ) {
503         while(first != last) {
504             first = unsafe_erase(first);
505         }
506         return iterator(first.get_node_ptr());
507     }
508 
unsafe_erase(const key_type & key)509     size_type unsafe_erase( const key_type& key ) {
510         return internal_erase_by_key(key);
511     }
512 
513     template <typename K>
514     typename std::enable_if<is_transparent<K>::value
515                             && !std::is_convertible<K, const_iterator>::value
516                             && !std::is_convertible<K, iterator>::value,
unsafe_erase(const K & key)517                             size_type>::type unsafe_erase( const K& key )
518     {
519         return internal_erase_by_key(key);
520     }
521 
unsafe_extract(const_iterator pos)522     node_type unsafe_extract( const_iterator pos ) {
523         internal_extract(pos.get_node_ptr());
524         return node_handle_accessor::construct<node_type>(pos.get_node_ptr());
525     }
526 
unsafe_extract(iterator pos)527     node_type unsafe_extract( iterator pos ) {
528         internal_extract(pos.get_node_ptr());
529         return node_handle_accessor::construct<node_type>(pos.get_node_ptr());
530     }
531 
unsafe_extract(const key_type & key)532     node_type unsafe_extract( const key_type& key ) {
533         iterator item = find(key);
534         return item == end() ? node_type() : unsafe_extract(item);
535     }
536 
537     template <typename K>
538     typename std::enable_if<is_transparent<K>::value
539                             && !std::is_convertible<K, const_iterator>::value
540                             && !std::is_convertible<K, iterator>::value,
unsafe_extract(const K & key)541                             node_type>::type unsafe_extract( const K& key )
542     {
543         iterator item = find(key);
544         return item == end() ? node_type() : unsafe_extract(item);
545     }
546 
547     // Lookup functions
find(const key_type & key)548     iterator find( const key_type& key ) {
549         value_node_ptr result = internal_find(key);
550         return result == nullptr ? end() : iterator(result);
551     }
552 
find(const key_type & key)553     const_iterator find( const key_type& key ) const {
554         value_node_ptr result = const_cast<self_type*>(this)->internal_find(key);
555         return result == nullptr ? end() : const_iterator(result);
556     }
557 
558     template <typename K>
find(const K & key)559     typename std::enable_if<is_transparent<K>::value, iterator>::type find( const K& key ) {
560         value_node_ptr result = internal_find(key);
561         return result == nullptr ? end() : iterator(result);
562     }
563 
564     template <typename K>
find(const K & key)565     typename std::enable_if<is_transparent<K>::value, const_iterator>::type find( const K& key ) const {
566         value_node_ptr result = const_cast<self_type*>(this)->internal_find(key);
567         return result == nullptr ? end() : const_iterator(result);
568     }
569 
equal_range(const key_type & key)570     std::pair<iterator, iterator> equal_range( const key_type& key ) {
571         auto result = internal_equal_range(key);
572         return std::make_pair(iterator(result.first), iterator(result.second));
573     }
574 
equal_range(const key_type & key)575     std::pair<const_iterator, const_iterator> equal_range( const key_type& key ) const {
576         auto result = const_cast<self_type*>(this)->internal_equal_range(key);
577         return std::make_pair(const_iterator(result.first), const_iterator(result.second));
578     }
579 
580     template <typename K>
equal_range(const K & key)581     typename std::enable_if<is_transparent<K>::value, std::pair<iterator, iterator>>::type equal_range( const K& key ) {
582         auto result = internal_equal_range(key);
583         return std::make_pair(iterator(result.first), iterator(result.second));
584     }
585 
586     template <typename K>
equal_range(const K & key)587     typename std::enable_if<is_transparent<K>::value, std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const {
588         auto result = const_cast<self_type*>(this)->internal_equal_range(key);
589         return std::make_pair(iterator(result.first), iterator(result.second));
590     }
591 
count(const key_type & key)592     size_type count( const key_type& key ) const {
593         return internal_count(key);
594     }
595 
596     template <typename K>
count(const K & key)597     typename std::enable_if<is_transparent<K>::value, size_type>::type count( const K& key ) const {
598         return internal_count(key);
599     }
600 
contains(const key_type & key)601     bool contains( const key_type& key ) const {
602         return find(key) != end();
603     }
604 
605     template <typename K>
contains(const K & key)606     typename std::enable_if<is_transparent<K>::value, bool>::type contains( const K& key ) const {
607         return find(key) != end();
608     }
609 
610     // Bucket interface
unsafe_begin(size_type n)611     local_iterator unsafe_begin( size_type n ) {
612         return local_iterator(first_value_node(get_bucket(n)));
613     }
614 
unsafe_begin(size_type n)615     const_local_iterator unsafe_begin( size_type n ) const {
616         auto bucket_begin = first_value_node(const_cast<self_type*>(this)->get_bucket(n));
617         return const_local_iterator(bucket_begin);
618     }
619 
unsafe_cbegin(size_type n)620     const_local_iterator unsafe_cbegin( size_type n ) const {
621         auto bucket_begin = first_value_node(const_cast<self_type*>(this)->get_bucket(n));
622         return const_local_iterator(bucket_begin);
623     }
624 
unsafe_end(size_type n)625     local_iterator unsafe_end( size_type n ) {
626         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
627         return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : local_iterator(nullptr);
628     }
629 
unsafe_end(size_type n)630     const_local_iterator unsafe_end( size_type n ) const {
631         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
632         return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr);
633     }
634 
unsafe_cend(size_type n)635     const_local_iterator unsafe_cend( size_type n ) const {
636         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
637         return n != bucket_count - 1 ? unsafe_begin(get_next_bucket_index(n)) : const_local_iterator(nullptr);
638     }
639 
unsafe_bucket_count()640     size_type unsafe_bucket_count() const { return my_bucket_count.load(std::memory_order_relaxed); }
641 
unsafe_max_bucket_count()642     size_type unsafe_max_bucket_count() const {
643         return max_size();
644     }
645 
unsafe_bucket_size(size_type n)646     size_type unsafe_bucket_size( size_type n ) const {
647         return size_type(std::distance(unsafe_begin(n), unsafe_end(n)));
648     }
649 
unsafe_bucket(const key_type & key)650     size_type unsafe_bucket( const key_type& key ) const {
651         return my_hash_compare(key) % my_bucket_count.load(std::memory_order_relaxed);
652     }
653 
654     // Hash policy
load_factor()655     float load_factor() const {
656         return float(size() / float(my_bucket_count.load(std::memory_order_acquire)));
657     }
658 
max_load_factor()659     float max_load_factor() const { return my_max_load_factor; }
660 
max_load_factor(float mlf)661     void max_load_factor( float mlf ) {
662         if (mlf != mlf || mlf < 0) {
663             tbb::detail::throw_exception(exception_id::invalid_load_factor);
664         }
665         my_max_load_factor = mlf;
666     } // TODO: unsafe?
667 
rehash(size_type bucket_count)668     void rehash( size_type bucket_count ) {
669         size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire);
670         if (current_bucket_count < bucket_count) {
671             // TODO: do we need do-while here?
672             my_bucket_count.compare_exchange_strong(current_bucket_count, round_up_to_power_of_two(bucket_count));
673         }
674     }
675 
reserve(size_type elements_count)676     void reserve( size_type elements_count ) {
677         size_type current_bucket_count = my_bucket_count.load(std::memory_order_acquire);
678         size_type necessary_bucket_count = current_bucket_count;
679 
680         // max_load_factor() is currently unsafe, so we can assume that my_max_load_factor
681         // would not be changed during the calculation
682         // TODO: Log2 seems useful here
683         while (necessary_bucket_count * max_load_factor() < elements_count) {
684                 necessary_bucket_count <<= 1;
685         }
686 
687         while (!my_bucket_count.compare_exchange_strong(current_bucket_count, necessary_bucket_count)) {
688             if (current_bucket_count >= necessary_bucket_count)
689                 break;
690         }
691     }
692 
693     // Observers
hash_function()694     hasher hash_function() const { return my_hash_compare.hash_function(); }
key_eq()695     key_equal key_eq() const { return my_hash_compare.key_eq(); }
696 
697     class const_range_type {
698     private:
699         const concurrent_unordered_base& my_instance;
700         node_ptr my_begin_node; // may be node* const
701         node_ptr my_end_node;
702         mutable node_ptr my_midpoint_node;
703     public:
704         using size_type = typename concurrent_unordered_base::size_type;
705         using value_type = typename concurrent_unordered_base::value_type;
706         using reference = typename concurrent_unordered_base::reference;
707         using difference_type = typename concurrent_unordered_base::difference_type;
708         using iterator = typename concurrent_unordered_base::const_iterator;
709 
empty()710         bool empty() const { return my_begin_node == my_end_node; }
711 
is_divisible()712         bool is_divisible() const {
713             return my_midpoint_node != my_end_node;
714         }
715 
grainsize()716         size_type grainsize() const { return 1; }
717 
const_range_type(const_range_type & range,split)718         const_range_type( const_range_type& range, split )
719             : my_instance(range.my_instance),
720               my_begin_node(range.my_midpoint_node),
721               my_end_node(range.my_end_node)
722         {
723             range.my_end_node = my_begin_node;
724             __TBB_ASSERT(!empty(), "Splitting despite the range is not divisible");
725             __TBB_ASSERT(!range.empty(), "Splitting despite the range is not divisible");
726             set_midpoint();
727             range.set_midpoint();
728         }
729 
begin()730         iterator begin() const { return iterator(my_instance.first_value_node(my_begin_node)); }
end()731         iterator end() const { return iterator(my_instance.first_value_node(my_end_node)); }
732 
const_range_type(const concurrent_unordered_base & table)733         const_range_type( const concurrent_unordered_base& table )
734             : my_instance(table), my_begin_node(my_instance.first_value_node(const_cast<node_ptr>(&table.my_head))), my_end_node(nullptr)
735         {
736             set_midpoint();
737         }
738     private:
set_midpoint()739         void set_midpoint() const {
740             if (empty()) {
741                 my_midpoint_node = my_end_node;
742             } else {
743                 sokey_type invalid_key = ~sokey_type(0);
744                 sokey_type begin_key = my_begin_node != nullptr ? my_begin_node->order_key() : invalid_key;
745                 sokey_type end_key = my_end_node != nullptr ? my_end_node->order_key() : invalid_key;
746 
747                 size_type mid_bucket = reverse_bits(begin_key + (end_key - begin_key) / 2) %
748                     my_instance.my_bucket_count.load(std::memory_order_relaxed);
749                 while( my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed) == nullptr) {
750                     mid_bucket = my_instance.get_parent(mid_bucket);
751                 }
752                 if (reverse_bits(mid_bucket) > begin_key) {
753                     // Found a dummy node between begin and end
754                     my_midpoint_node = my_instance.first_value_node(
755                         my_instance.my_segments[mid_bucket].load(std::memory_order_relaxed));
756                 } else {
757                     // Didn't find a dummy node between begin and end
758                     my_midpoint_node = my_end_node;
759                 }
760             }
761         }
762     }; // class const_range_type
763 
764     class range_type : public const_range_type {
765     public:
766         using iterator = typename concurrent_unordered_base::iterator;
767         using const_range_type::const_range_type;
768 
begin()769         iterator begin() const { return iterator(const_range_type::begin().get_node_ptr()); }
end()770         iterator end() const { return iterator(const_range_type::end().get_node_ptr()); }
771     }; // class range_type
772 
773     // Parallel iteration
range()774     range_type range() {
775         return range_type(*this);
776     }
777 
range()778     const_range_type range() const {
779         return const_range_type(*this);
780     }
781 protected:
782     static constexpr bool allow_multimapping = traits_type::allow_multimapping;
783 
784 private:
785     static constexpr size_type initial_bucket_count = 8;
786     static constexpr float initial_max_load_factor = 4; // TODO: consider 1?
787     static constexpr size_type pointers_per_embedded_table = sizeof(size_type) * 8 - 1;
788 
789     class unordered_segment_table
790         : public segment_table<std::atomic<node_ptr>, allocator_type, unordered_segment_table, pointers_per_embedded_table>
791     {
792         using self_type = unordered_segment_table;
793         using atomic_node_ptr = std::atomic<node_ptr>;
794         using base_type = segment_table<std::atomic<node_ptr>, allocator_type, unordered_segment_table, pointers_per_embedded_table>;
795         using segment_type = typename base_type::segment_type;
796         using base_allocator_type = typename base_type::allocator_type;
797 
798         using segment_allocator_type = typename allocator_traits_type::template rebind_alloc<atomic_node_ptr>;
799         using segment_allocator_traits = tbb::detail::allocator_traits<segment_allocator_type>;
800     public:
801         // Segment table for unordered containers should not be extended in the wait- free implementation
802         static constexpr bool allow_table_extending = false;
803         static constexpr bool is_noexcept_assignment = std::is_nothrow_move_assignable<hasher>::value &&
804                                                        std::is_nothrow_move_assignable<key_equal>::value &&
805                                                        segment_allocator_traits::is_always_equal::value;
806         static constexpr bool is_noexcept_swap = tbb::detail::is_nothrow_swappable<hasher>::value &&
807                                                  tbb::detail::is_nothrow_swappable<key_equal>::value &&
808                                                  segment_allocator_traits::is_always_equal::value;
809 
810         // TODO: using base_type::base_type is not compiling on Windows and Intel Compiler - investigate
811         unordered_segment_table( const base_allocator_type& alloc = base_allocator_type() )
base_type(alloc)812             : base_type(alloc) {}
813 
814         unordered_segment_table( const unordered_segment_table& ) = default;
815 
unordered_segment_table(const unordered_segment_table & other,const base_allocator_type & alloc)816         unordered_segment_table( const unordered_segment_table& other, const base_allocator_type& alloc )
817             : base_type(other, alloc) {}
818 
819         unordered_segment_table( unordered_segment_table&& ) = default;
820 
unordered_segment_table(unordered_segment_table && other,const base_allocator_type & alloc)821         unordered_segment_table( unordered_segment_table&& other, const base_allocator_type& alloc )
822             : base_type(std::move(other), alloc) {}
823 
824         unordered_segment_table& operator=( const unordered_segment_table& ) = default;
825 
826         unordered_segment_table& operator=( unordered_segment_table&& ) = default;
827 
create_segment(typename base_type::segment_table_type,typename base_type::segment_index_type segment_index,size_type)828         segment_type create_segment( typename base_type::segment_table_type, typename base_type::segment_index_type segment_index, size_type ) {
829             segment_allocator_type alloc(this->get_allocator());
830             size_type seg_size = this->segment_size(segment_index);
831             segment_type new_segment = segment_allocator_traits::allocate(alloc, seg_size);
832             for (size_type i = 0; i != seg_size; ++i) {
833                 segment_allocator_traits::construct(alloc, new_segment + i, nullptr);
834             }
835             return new_segment;
836         }
837 
nullify_segment(typename base_type::segment_table_type table,size_type segment_index)838         segment_type nullify_segment( typename base_type::segment_table_type table, size_type segment_index ) {
839             segment_type target_segment = table[segment_index].load(std::memory_order_relaxed);
840             table[segment_index].store(nullptr, std::memory_order_relaxed);
841             return target_segment;
842         }
843 
844         // deallocate_segment is required by the segment_table base class, but
845         // in unordered, it is also necessary to call the destructor during deallocation
deallocate_segment(segment_type address,size_type index)846         void deallocate_segment( segment_type address, size_type index ) {
847             destroy_segment(address, index);
848         }
849 
destroy_segment(segment_type address,size_type index)850         void destroy_segment( segment_type address, size_type index ) {
851             segment_allocator_type alloc(this->get_allocator());
852             for (size_type i = 0; i != this->segment_size(index); ++i) {
853                 segment_allocator_traits::destroy(alloc, address + i);
854             }
855             segment_allocator_traits::deallocate(alloc, address, this->segment_size(index));
856         }
857 
858 
copy_segment(size_type index,segment_type,segment_type to)859         void copy_segment( size_type index, segment_type, segment_type to ) {
860             if (index == 0) {
861                 // The first element in the first segment is embedded into the table (my_head)
862                 // so the first pointer should not be stored here
863                 // It would be stored during move ctor/assignment operation
864                 to[1].store(nullptr, std::memory_order_relaxed);
865             } else {
866                 for (size_type i = 0; i != this->segment_size(index); ++i) {
867                     to[i].store(nullptr, std::memory_order_relaxed);
868                 }
869             }
870         }
871 
move_segment(size_type index,segment_type from,segment_type to)872         void move_segment( size_type index, segment_type from, segment_type to ) {
873             if (index == 0) {
874                 // The first element in the first segment is embedded into the table (my_head)
875                 // so the first pointer should not be stored here
876                 // It would be stored during move ctor/assignment operation
877                 to[1].store(from[1].load(std::memory_order_relaxed), std::memory_order_relaxed);
878             } else {
879                 for (size_type i = 0; i != this->segment_size(index); ++i) {
880                     to[i].store(from[i].load(std::memory_order_relaxed), std::memory_order_relaxed);
881                     from[i].store(nullptr, std::memory_order_relaxed);
882                 }
883             }
884         }
885 
886         // allocate_long_table is required by the segment_table base class, but unused for unordered containers
allocate_long_table(const typename base_type::atomic_segment *,size_type)887         typename base_type::segment_table_type allocate_long_table( const typename base_type::atomic_segment*, size_type ) {
888             __TBB_ASSERT(false, "This method should never been called");
889             // TableType is a pointer
890             return nullptr;
891         }
892 
893         // destroy_elements is required by the segment_table base class, but unused for unordered containers
894         // this function call but do nothing
destroy_elements()895         void destroy_elements() {}
896     }; // struct unordered_segment_table
897 
internal_clear()898     void internal_clear() {
899         // TODO: consider usefulness of two versions of clear() - with dummy nodes deallocation and without it
900         node_ptr next = my_head.next();
901         node_ptr curr = next;
902 
903         my_head.set_next(nullptr);
904 
905         while (curr != nullptr) {
906             next = curr->next();
907             destroy_node(curr);
908             curr = next;
909         }
910 
911         my_size.store(0, std::memory_order_relaxed);
912         my_segments.clear();
913     }
914 
destroy_node(node_ptr node)915     void destroy_node( node_ptr node ) {
916         if (node->is_dummy()) {
917             node_allocator_type dummy_node_allocator(my_segments.get_allocator());
918             // Destroy the node
919             node_allocator_traits::destroy(dummy_node_allocator, node);
920             // Deallocate the memory
921             node_allocator_traits::deallocate(dummy_node_allocator, node, 1);
922         } else {
923             // GCC 11.1 issues a warning here that incorrect destructor might be called for dummy_nodes
924             #if (__TBB_GCC_VERSION >= 110100 && __TBB_GCC_VERSION < 140000 ) && !__clang__ && !__INTEL_COMPILER
925             volatile
926             #endif
927             value_node_ptr val_node = static_cast<value_node_ptr>(node);
928             value_node_allocator_type value_node_allocator(my_segments.get_allocator());
929             // Destroy the value
930             value_node_allocator_traits::destroy(value_node_allocator, val_node->storage());
931             // Destroy the node
932             value_node_allocator_traits::destroy(value_node_allocator, val_node);
933             // Deallocate the memory
934             value_node_allocator_traits::deallocate(value_node_allocator, val_node, 1);
935         }
936     }
937 
938     struct internal_insert_return_type {
939         // If the insertion failed - the remaining_node points to the node, which was failed to insert
940         // This node can be allocated in process of insertion
941         value_node_ptr remaining_node;
942         // If the insertion failed - node_with_equal_key points to the node in the list with the
943         // key, equivalent to the inserted, otherwise it points to the node, which was inserted.
944         value_node_ptr node_with_equal_key;
945         // Insertion status
946         // NOTE: if it is true - remaining_node should be nullptr
947         bool inserted;
948     }; // struct internal_insert_return_type
949 
950     // Inserts the value into the split ordered list
951     template <typename ValueType>
internal_insert_value(ValueType && value)952     std::pair<iterator, bool> internal_insert_value( ValueType&& value ) {
953 
954         auto create_value_node = [&value, this]( sokey_type order_key )->value_node_ptr {
955             return create_node(order_key, std::forward<ValueType>(value));
956         };
957 
958         auto insert_result = internal_insert(value, create_value_node);
959 
960         if (insert_result.remaining_node != nullptr) {
961             // If the insertion fails - destroy the node which was failed to insert if it exist
962             __TBB_ASSERT(!insert_result.inserted,
963                          "remaining_node should be nullptr if the node was successfully inserted");
964             destroy_node(insert_result.remaining_node);
965         }
966 
967         return { iterator(insert_result.node_with_equal_key), insert_result.inserted };
968     }
969 
970     // Inserts the node into the split ordered list
971     // Creates a node using the specified callback after the place for insertion was found
972     // Returns internal_insert_return_type object, where:
973     //     - If the insertion succeeded:
974     //         - remaining_node is nullptr
975     //         - node_with_equal_key point to the inserted node
976     //         - inserted is true
977     //     - If the insertion failed:
978     //         - remaining_node points to the node, that was failed to insert if it was created.
979     //           nullptr if the node was not created, because the requested key was already
980     //           presented in the list
981     //         - node_with_equal_key point to the element in the list with the key, equivalent to
982     //           to the requested key
983     //         - inserted is false
984     template <typename ValueType, typename CreateInsertNode>
internal_insert(ValueType && value,CreateInsertNode create_insert_node)985     internal_insert_return_type internal_insert( ValueType&& value, CreateInsertNode create_insert_node ) {
986         static_assert(std::is_same<typename std::decay<ValueType>::type, value_type>::value,
987                       "Incorrect type in internal_insert");
988         const key_type& key = traits_type::get_key(value);
989         sokey_type hash_key = sokey_type(my_hash_compare(key));
990 
991         sokey_type order_key = split_order_key_regular(hash_key);
992         node_ptr prev = prepare_bucket(hash_key);
993         __TBB_ASSERT(prev != nullptr, "Invalid head node");
994 
995         auto search_result = search_after(prev, order_key, key);
996 
997         if (search_result.second) {
998             return internal_insert_return_type{ nullptr, search_result.first, false };
999         }
1000 
1001         value_node_ptr new_node = create_insert_node(order_key);
1002         node_ptr curr = search_result.first;
1003 
1004         while (!try_insert(prev, new_node, curr)) {
1005             search_result = search_after(prev, order_key, key);
1006             if (search_result.second) {
1007                 return internal_insert_return_type{ new_node, search_result.first, false };
1008             }
1009             curr = search_result.first;
1010         }
1011 
1012         auto sz = my_size.fetch_add(1);
1013         adjust_table_size(sz + 1, my_bucket_count.load(std::memory_order_acquire));
1014         return internal_insert_return_type{ nullptr, static_cast<value_node_ptr>(new_node), true };
1015     }
1016 
1017     // Searches the node with the key, equivalent to key with requested order key after the node prev
1018     // Returns the existing node and true if the node is already in the list
1019     // Returns the first node with the order key, greater than requested and false if the node is not presented in the list
search_after(node_ptr & prev,sokey_type order_key,const key_type & key)1020     std::pair<value_node_ptr, bool> search_after( node_ptr& prev, sokey_type order_key, const key_type& key ) {
1021         // NOTE: static_cast<value_node_ptr>(curr) should be done only after we would ensure
1022         // that the node is not a dummy node
1023 
1024         node_ptr curr = prev->next();
1025 
1026         while (curr != nullptr && (curr->order_key() < order_key ||
1027                (curr->order_key() == order_key && !my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key))))
1028         {
1029             prev = curr;
1030             curr = curr->next();
1031         }
1032 
1033         if (curr != nullptr && curr->order_key() == order_key && !allow_multimapping) {
1034             return { static_cast<value_node_ptr>(curr), true };
1035         }
1036         return { static_cast<value_node_ptr>(curr), false };
1037     }
1038 
adjust_table_size(size_type total_elements,size_type current_size)1039     void adjust_table_size( size_type total_elements, size_type current_size ) {
1040         // Grow the table by a factor of 2 if possible and needed
1041         if ( (float(total_elements) / float(current_size)) > my_max_load_factor ) {
1042             // Double the size of the hash only if size hash not changed in between loads
1043             my_bucket_count.compare_exchange_strong(current_size, 2u * current_size);
1044         }
1045     }
1046 
insert_dummy_node(node_ptr parent_dummy_node,sokey_type order_key)1047     node_ptr insert_dummy_node( node_ptr parent_dummy_node, sokey_type order_key ) {
1048         node_ptr prev_node = parent_dummy_node;
1049 
1050         node_ptr dummy_node = create_dummy_node(order_key);
1051         node_ptr next_node;
1052 
1053         do {
1054             next_node = prev_node->next();
1055             // Move forward through the list while the order key is less than requested
1056             while (next_node != nullptr && next_node->order_key() < order_key) {
1057                 prev_node = next_node;
1058                 next_node = next_node->next();
1059             }
1060 
1061             if (next_node != nullptr && next_node->order_key() == order_key) {
1062                 // Another dummy node with the same order key was inserted by another thread
1063                 // Destroy the node and exit
1064                 destroy_node(dummy_node);
1065                 return next_node;
1066             }
1067         } while (!try_insert(prev_node, dummy_node, next_node));
1068 
1069         return dummy_node;
1070     }
1071 
1072     // Try to insert a node between prev_node and expected next
1073     // If the next is not equal to expected next - return false
try_insert(node_ptr prev_node,node_ptr new_node,node_ptr current_next_node)1074     static bool try_insert( node_ptr prev_node, node_ptr new_node, node_ptr current_next_node ) {
1075         new_node->set_next(current_next_node);
1076         return prev_node->try_set_next(current_next_node, new_node);
1077     }
1078 
1079     // Returns the bucket, associated with the hash_key
prepare_bucket(sokey_type hash_key)1080     node_ptr prepare_bucket( sokey_type hash_key ) {
1081         size_type bucket = hash_key % my_bucket_count.load(std::memory_order_acquire);
1082         return get_bucket(bucket);
1083     }
1084 
1085     // Initialize the corresponding bucket if it is not initialized
get_bucket(size_type bucket_index)1086     node_ptr get_bucket( size_type bucket_index ) {
1087         if (my_segments[bucket_index].load(std::memory_order_acquire) == nullptr) {
1088             init_bucket(bucket_index);
1089         }
1090         return my_segments[bucket_index].load(std::memory_order_acquire);
1091     }
1092 
init_bucket(size_type bucket)1093     void init_bucket( size_type bucket ) {
1094         if (bucket == 0) {
1095             // Atomicaly store the first bucket into my_head
1096             node_ptr disabled = nullptr;
1097             my_segments[0].compare_exchange_strong(disabled, &my_head);
1098             return;
1099         }
1100 
1101         size_type parent_bucket = get_parent(bucket);
1102 
1103         while (my_segments[parent_bucket].load(std::memory_order_acquire) == nullptr) {
1104             // Initialize all of the parent buckets
1105             init_bucket(parent_bucket);
1106         }
1107 
1108         __TBB_ASSERT(my_segments[parent_bucket].load(std::memory_order_acquire) != nullptr, "Parent bucket should be initialized");
1109         node_ptr parent = my_segments[parent_bucket].load(std::memory_order_acquire);
1110 
1111         // Insert dummy node into the list
1112         node_ptr dummy_node = insert_dummy_node(parent, split_order_key_dummy(bucket));
1113         // TODO: consider returning pair<node_ptr, bool> to avoid store operation if the bucket was stored by an other thread
1114         // or move store to insert_dummy_node
1115         // Add dummy_node into the segment table
1116         my_segments[bucket].store(dummy_node, std::memory_order_release);
1117     }
1118 
create_dummy_node(sokey_type order_key)1119     node_ptr create_dummy_node( sokey_type order_key ) {
1120         node_allocator_type dummy_node_allocator(my_segments.get_allocator());
1121         node_ptr dummy_node = node_allocator_traits::allocate(dummy_node_allocator, 1);
1122         node_allocator_traits::construct(dummy_node_allocator, dummy_node, order_key);
1123         return dummy_node;
1124     }
1125 
1126     template <typename... Args>
create_node(sokey_type order_key,Args &&...args)1127     value_node_ptr create_node( sokey_type order_key, Args&&... args ) {
1128         value_node_allocator_type value_node_allocator(my_segments.get_allocator());
1129         // Allocate memory for the value_node
1130         value_node_ptr new_node = value_node_allocator_traits::allocate(value_node_allocator, 1);
1131         // Construct the node
1132         value_node_allocator_traits::construct(value_node_allocator, new_node, order_key);
1133 
1134         // try_call API is not convenient here due to broken
1135         // variadic capture on GCC 4.8.5
1136         auto value_guard = make_raii_guard([&] {
1137             value_node_allocator_traits::destroy(value_node_allocator, new_node);
1138             value_node_allocator_traits::deallocate(value_node_allocator, new_node, 1);
1139         });
1140 
1141         // Construct the value in the node
1142         value_node_allocator_traits::construct(value_node_allocator, new_node->storage(), std::forward<Args>(args)...);
1143         value_guard.dismiss();
1144         return new_node;
1145     }
1146 
first_value_node(node_ptr first_node)1147     value_node_ptr first_value_node( node_ptr first_node ) const {
1148         while (first_node != nullptr && first_node->is_dummy()) {
1149             first_node = first_node->next();
1150         }
1151         return static_cast<value_node_ptr>(first_node);
1152     }
1153 
1154     // Unsafe method, which removes the node from the list and returns the next node
internal_erase(value_node_ptr node_to_erase)1155     node_ptr internal_erase( value_node_ptr node_to_erase ) {
1156         __TBB_ASSERT(node_to_erase != nullptr, "Invalid iterator for erase");
1157         node_ptr next_node = node_to_erase->next();
1158         internal_extract(node_to_erase);
1159         destroy_node(node_to_erase);
1160         return next_node;
1161     }
1162 
1163     template <typename K>
internal_erase_by_key(const K & key)1164     size_type internal_erase_by_key( const K& key ) {
1165         // TODO: consider reimplementation without equal_range - it is not effective to perform lookup over a bucket
1166         // for each unsafe_erase call
1167         auto eq_range = equal_range(key);
1168         size_type erased_count = 0;
1169 
1170         for (auto it = eq_range.first; it != eq_range.second;) {
1171             it = unsafe_erase(it);
1172             ++erased_count;
1173         }
1174         return erased_count;
1175     }
1176 
1177     // Unsafe method, which extracts the node from the list
internal_extract(value_node_ptr node_to_extract)1178     void internal_extract( value_node_ptr node_to_extract ) {
1179         const key_type& key = traits_type::get_key(node_to_extract->value());
1180         sokey_type hash_key = sokey_type(my_hash_compare(key));
1181 
1182         node_ptr prev_node = prepare_bucket(hash_key);
1183 
1184         for (node_ptr node = prev_node->next(); node != nullptr; prev_node = node, node = node->next()) {
1185             if (node == node_to_extract) {
1186                 unlink_node(prev_node, node, node_to_extract->next());
1187                 my_size.store(my_size.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
1188                 return;
1189             }
1190             __TBB_ASSERT(node->order_key() <= node_to_extract->order_key(),
1191                          "node, which is going to be extracted should be presented in the list");
1192         }
1193     }
1194 
1195 protected:
1196     template <typename SourceType>
internal_merge(SourceType && source)1197     void internal_merge( SourceType&& source ) {
1198         static_assert(std::is_same<node_type, typename std::decay<SourceType>::type::node_type>::value,
1199                       "Incompatible containers cannot be merged");
1200 
1201         for (node_ptr source_prev = &source.my_head; source_prev->next() != nullptr;) {
1202             if (!source_prev->next()->is_dummy()) {
1203                 value_node_ptr curr = static_cast<value_node_ptr>(source_prev->next());
1204                 // If the multimapping is allowed, or the key is not presented
1205                 // in the *this container - extract the node from the list
1206                 if (allow_multimapping || !contains(traits_type::get_key(curr->value()))) {
1207                     node_ptr next_node = curr->next();
1208                     source.unlink_node(source_prev, curr, next_node);
1209 
1210                     // Remember the old order key
1211                     sokey_type old_order_key = curr->order_key();
1212 
1213                     // Node handle with curr cannot be used directly in insert call, because
1214                     // the destructor of node_type will destroy curr
1215                     node_type curr_node = node_handle_accessor::construct<node_type>(curr);
1216 
1217                     // If the insertion fails - return ownership of the node to the source
1218                     if (!insert(std::move(curr_node)).second) {
1219                         __TBB_ASSERT(!allow_multimapping, "Insertion should succeed for multicontainer");
1220                         __TBB_ASSERT(source_prev->next() == next_node,
1221                                      "Concurrent operations with the source container in merge are prohibited");
1222 
1223                         // Initialize the node with the old order key, because the order key
1224                         // can change during the insertion
1225                         curr->init(old_order_key);
1226                         __TBB_ASSERT(old_order_key >= source_prev->order_key() &&
1227                                      (next_node == nullptr || old_order_key <= next_node->order_key()),
1228                                      "Wrong nodes order in the source container");
1229                         // Merge is unsafe for source container, so the insertion back can be done without compare_exchange
1230                         curr->set_next(next_node);
1231                         source_prev->set_next(curr);
1232                         source_prev = curr;
1233                         node_handle_accessor::deactivate(curr_node);
1234                     } else {
1235                         source.my_size.fetch_sub(1, std::memory_order_relaxed);
1236                     }
1237                 } else {
1238                     source_prev = curr;
1239                 }
1240             } else {
1241                 source_prev = source_prev->next();
1242             }
1243         }
1244     }
1245 
1246 private:
1247     // Unsafe method, which unlinks the node between prev and next
unlink_node(node_ptr prev_node,node_ptr node_to_unlink,node_ptr next_node)1248     void unlink_node( node_ptr prev_node, node_ptr node_to_unlink, node_ptr next_node ) {
1249         __TBB_ASSERT(prev_node->next() == node_to_unlink &&
1250                      node_to_unlink->next() == next_node,
1251                      "erasing and extracting nodes from the containers are unsafe in concurrent mode");
1252         prev_node->set_next(next_node);
1253         node_to_unlink->set_next(nullptr);
1254     }
1255 
1256     template <typename K>
internal_find(const K & key)1257     value_node_ptr internal_find( const K& key ) {
1258         sokey_type hash_key = sokey_type(my_hash_compare(key));
1259         sokey_type order_key = split_order_key_regular(hash_key);
1260 
1261         node_ptr curr = prepare_bucket(hash_key);
1262 
1263         while (curr != nullptr) {
1264             if (curr->order_key() > order_key) {
1265                 // If the order key is greater than the requested order key,
1266                 // the element is not in the hash table
1267                 return nullptr;
1268             } else if (curr->order_key() == order_key &&
1269                        my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key)) {
1270                 // The fact that order keys match does not mean that the element is found.
1271                 // Key function comparison has to be performed to check whether this is the
1272                 // right element. If not, keep searching while order key is the same.
1273                 return static_cast<value_node_ptr>(curr);
1274             }
1275             curr = curr->next();
1276         }
1277 
1278         return nullptr;
1279     }
1280 
1281     template <typename K>
internal_equal_range(const K & key)1282     std::pair<value_node_ptr, value_node_ptr> internal_equal_range( const K& key ) {
1283         sokey_type hash_key = sokey_type(my_hash_compare(key));
1284         sokey_type order_key = split_order_key_regular(hash_key);
1285 
1286         node_ptr curr = prepare_bucket(hash_key);
1287 
1288         while (curr != nullptr) {
1289             if (curr->order_key() > order_key) {
1290                 // If the order key is greater than the requested order key,
1291                 // the element is not in the hash table
1292                 return std::make_pair(nullptr, nullptr);
1293             } else if (curr->order_key() == order_key &&
1294                        my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(curr)->value()), key)) {
1295                 value_node_ptr first = static_cast<value_node_ptr>(curr);
1296                 node_ptr last = first;
1297                 do {
1298                     last = last->next();
1299                 } while (allow_multimapping && last != nullptr && !last->is_dummy() &&
1300                         my_hash_compare(traits_type::get_key(static_cast<value_node_ptr>(last)->value()), key));
1301                 return std::make_pair(first, first_value_node(last));
1302             }
1303             curr = curr->next();
1304         }
1305         return {nullptr, nullptr};
1306     }
1307 
1308     template <typename K>
internal_count(const K & key)1309     size_type internal_count( const K& key ) const {
1310         if (allow_multimapping) {
1311             // TODO: consider reimplementing the internal_equal_range with elements counting to avoid std::distance
1312             auto eq_range = equal_range(key);
1313             return std::distance(eq_range.first, eq_range.second);
1314         } else {
1315             return contains(key) ? 1 : 0;
1316         }
1317     }
1318 
internal_copy(const concurrent_unordered_base & other)1319     void internal_copy( const concurrent_unordered_base& other ) {
1320         node_ptr last_node = &my_head;
1321         my_segments[0].store(&my_head, std::memory_order_relaxed);
1322 
1323         for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) {
1324             node_ptr new_node;
1325             if (!node->is_dummy()) {
1326                 // The node in the right table contains a value
1327                 new_node = create_node(node->order_key(), static_cast<value_node_ptr>(node)->value());
1328             } else {
1329                 // The node in the right table is a dummy node
1330                 new_node = create_dummy_node(node->order_key());
1331                 my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed);
1332             }
1333 
1334             last_node->set_next(new_node);
1335             last_node = new_node;
1336         }
1337     }
1338 
internal_move(concurrent_unordered_base && other)1339     void internal_move( concurrent_unordered_base&& other ) {
1340         node_ptr last_node = &my_head;
1341         my_segments[0].store(&my_head, std::memory_order_relaxed);
1342 
1343         for (node_ptr node = other.my_head.next(); node != nullptr; node = node->next()) {
1344             node_ptr new_node;
1345             if (!node->is_dummy()) {
1346                 // The node in the right table contains a value
1347                 new_node = create_node(node->order_key(), std::move(static_cast<value_node_ptr>(node)->value()));
1348             } else {
1349                 // TODO: do we need to destroy a dummy node in the right container?
1350                 // The node in the right table is a dummy_node
1351                 new_node = create_dummy_node(node->order_key());
1352                 my_segments[reverse_bits(node->order_key())].store(new_node, std::memory_order_relaxed);
1353             }
1354 
1355             last_node->set_next(new_node);
1356             last_node = new_node;
1357         }
1358     }
1359 
move_content(concurrent_unordered_base && other)1360     void move_content( concurrent_unordered_base&& other ) {
1361         // NOTE: allocators should be equal
1362         my_head.set_next(other.my_head.next());
1363         other.my_head.set_next(nullptr);
1364         my_segments[0].store(&my_head, std::memory_order_relaxed);
1365 
1366         other.my_bucket_count.store(initial_bucket_count, std::memory_order_relaxed);
1367         other.my_max_load_factor = initial_max_load_factor;
1368         other.my_size.store(0, std::memory_order_relaxed);
1369     }
1370 
internal_move_construct_with_allocator(concurrent_unordered_base && other,const allocator_type &,std::true_type)1371     void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type&,
1372                                                  /*is_always_equal = */std::true_type ) {
1373         // Allocators are always equal - no need to compare for equality
1374         move_content(std::move(other));
1375     }
1376 
internal_move_construct_with_allocator(concurrent_unordered_base && other,const allocator_type & alloc,std::false_type)1377     void internal_move_construct_with_allocator( concurrent_unordered_base&& other, const allocator_type& alloc,
1378                                                  /*is_always_equal = */std::false_type ) {
1379         // Allocators are not always equal
1380         if (alloc == other.my_segments.get_allocator()) {
1381             move_content(std::move(other));
1382         } else {
1383             try_call( [&] {
1384                 internal_move(std::move(other));
1385             } ).on_exception( [&] {
1386                 clear();
1387             });
1388         }
1389     }
1390 
1391     // Move assigns the hash table to other is any instances of allocator_type are always equal
1392     // or propagate_on_container_move_assignment is true
internal_move_assign(concurrent_unordered_base && other,std::true_type)1393     void internal_move_assign( concurrent_unordered_base&& other, /*is_always_equal || POCMA = */std::true_type ) {
1394         move_content(std::move(other));
1395     }
1396 
1397     // Move assigns the hash table to other is any instances of allocator_type are not always equal
1398     // and propagate_on_container_move_assignment is false
internal_move_assign(concurrent_unordered_base && other,std::false_type)1399     void internal_move_assign( concurrent_unordered_base&& other, /*is_always_equal || POCMA = */std::false_type ) {
1400         if (my_segments.get_allocator() == other.my_segments.get_allocator()) {
1401             move_content(std::move(other));
1402         } else {
1403             // TODO: guards for exceptions
1404             internal_move(std::move(other));
1405         }
1406     }
1407 
internal_swap(concurrent_unordered_base & other,std::true_type)1408     void internal_swap( concurrent_unordered_base& other, /*is_always_equal || POCS = */std::true_type ) {
1409         internal_swap_fields(other);
1410     }
1411 
internal_swap(concurrent_unordered_base & other,std::false_type)1412     void internal_swap( concurrent_unordered_base& other, /*is_always_equal || POCS = */std::false_type ) {
1413         __TBB_ASSERT(my_segments.get_allocator() == other.my_segments.get_allocator(),
1414                      "Swapping with unequal allocators is not allowed");
1415         internal_swap_fields(other);
1416     }
1417 
internal_swap_fields(concurrent_unordered_base & other)1418     void internal_swap_fields( concurrent_unordered_base& other ) {
1419         node_ptr first_node = my_head.next();
1420         my_head.set_next(other.my_head.next());
1421         other.my_head.set_next(first_node);
1422 
1423         size_type current_size = my_size.load(std::memory_order_relaxed);
1424         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
1425         other.my_size.store(current_size, std::memory_order_relaxed);
1426 
1427         size_type bucket_count = my_bucket_count.load(std::memory_order_relaxed);
1428         my_bucket_count.store(other.my_bucket_count.load(std::memory_order_relaxed), std::memory_order_relaxed);
1429         other.my_bucket_count.store(bucket_count, std::memory_order_relaxed);
1430 
1431         using std::swap;
1432         swap(my_max_load_factor, other.my_max_load_factor);
1433         swap(my_hash_compare, other.my_hash_compare);
1434         my_segments.swap(other.my_segments);
1435 
1436         // swap() method from segment table swaps all of the segments including the first segment
1437         // We should restore it to my_head. Without it the first segment of the container will point
1438         // to other.my_head.
1439         my_segments[0].store(&my_head, std::memory_order_relaxed);
1440         other.my_segments[0].store(&other.my_head, std::memory_order_relaxed);
1441     }
1442 
1443     // A regular order key has its original hash value reversed and the last bit set
split_order_key_regular(sokey_type hash)1444     static constexpr sokey_type split_order_key_regular( sokey_type hash ) {
1445         return reverse_bits(hash) | 0x1;
1446     }
1447 
1448     // A dummy order key has its original hash value reversed and the last bit unset
split_order_key_dummy(sokey_type hash)1449     static constexpr sokey_type split_order_key_dummy( sokey_type hash ) {
1450         return reverse_bits(hash) & ~sokey_type(0x1);
1451     }
1452 
get_parent(size_type bucket)1453     size_type get_parent( size_type bucket ) const {
1454         // Unset bucket's most significant turned-on bit
1455         __TBB_ASSERT(bucket != 0, "Unable to get_parent of the bucket 0");
1456         size_type msb = tbb::detail::log2(bucket);
1457         return bucket & ~(size_type(1) << msb);
1458     }
1459 
get_next_bucket_index(size_type bucket)1460     size_type get_next_bucket_index( size_type bucket ) const {
1461         size_type bits = tbb::detail::log2(my_bucket_count.load(std::memory_order_relaxed));
1462         size_type reversed_next = reverse_n_bits(bucket, bits) + 1;
1463         return reverse_n_bits(reversed_next, bits);
1464     }
1465 
1466     std::atomic<size_type> my_size;
1467     std::atomic<size_type> my_bucket_count;
1468     float my_max_load_factor;
1469     hash_compare_type my_hash_compare;
1470 
1471     list_node_type my_head; // Head node for split ordered list
1472     unordered_segment_table my_segments; // Segment table of pointers to nodes
1473 
1474     template <typename Container, typename Value>
1475     friend class solist_iterator;
1476 
1477     template <typename OtherTraits>
1478     friend class concurrent_unordered_base;
1479 }; // class concurrent_unordered_base
1480 
1481 template <typename Traits>
1482 bool operator==( const concurrent_unordered_base<Traits>& lhs,
1483                  const concurrent_unordered_base<Traits>& rhs ) {
1484     if (&lhs == &rhs) { return true; }
1485     if (lhs.size() != rhs.size()) { return false; }
1486 
1487 #if _MSC_VER
1488     // Passing "unchecked" iterators to std::permutation with 3 parameters
1489     // causes compiler warnings.
1490     // The workaround is to use overload with 4 parameters, which is
1491     // available since C++14 - minimally supported version on MSVC
1492     return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
1493 #else
1494     return std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
1495 #endif
1496 }
1497 
1498 #if !__TBB_CPP20_COMPARISONS_PRESENT
1499 template <typename Traits>
1500 bool operator!=( const concurrent_unordered_base<Traits>& lhs,
1501                  const concurrent_unordered_base<Traits>& rhs ) {
1502     return !(lhs == rhs);
1503 }
1504 #endif
1505 
1506 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1507 #pragma warning(pop) // warning 4127 is back
1508 #endif
1509 
1510 } // namespace d1
1511 } // namespace detail
1512 } // namespace tbb
1513 
1514 #endif // __TBB_detail__concurrent_unordered_base_H
1515