1 /* 2 Copyright (c) 2019-2022 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #ifndef __TBB_detail__concurrent_skip_list_H 18 #define __TBB_detail__concurrent_skip_list_H 19 20 #if !defined(__TBB_concurrent_map_H) && !defined(__TBB_concurrent_set_H) 21 #error Do not #include this internal file directly; use public TBB headers instead. 22 #endif 23 24 #include "_config.h" 25 #include "_range_common.h" 26 #include "_allocator_traits.h" 27 #include "_template_helpers.h" 28 #include "_node_handle.h" 29 #include "_containers_helpers.h" 30 #include "_assert.h" 31 #include "_exception.h" 32 #include "../enumerable_thread_specific.h" 33 #include <utility> 34 #include <initializer_list> 35 #include <atomic> 36 #include <array> 37 #include <type_traits> 38 #include <random> // Need std::geometric_distribution 39 #include <algorithm> // Need std::equal and std::lexicographical_compare 40 #include <cstdint> 41 #if __TBB_CPP20_COMPARISONS_PRESENT 42 #include <compare> 43 #endif 44 45 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) 46 #pragma warning(push) 47 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant 48 #endif 49 50 namespace tbb { 51 namespace detail { 52 namespace d2 { 53 54 template <typename Value, typename Allocator> 55 class skip_list_node { 56 using node_ptr = skip_list_node*; 57 public: 58 using value_type = Value; 59 using atomic_node_ptr = std::atomic<node_ptr>; 60 using size_type = std::size_t; 61 using container_allocator_type = Allocator; 62 63 using reference = value_type&; 64 using const_reference = const value_type&; 65 private: 66 using allocator_traits = tbb::detail::allocator_traits<container_allocator_type>; 67 68 // Allocator is the same as the container allocator=> allocates unitptr_t 69 // It is required to rebind it to value_type to get the correct pointer and const_pointer 70 using value_allocator_traits = typename allocator_traits::template rebind_traits<value_type>; 71 public: 72 using pointer = typename value_allocator_traits::pointer; 73 using const_pointer = typename value_allocator_traits::const_pointer; 74 75 //In perfect world these constructor and destructor would have been private, 76 //however this seems technically impractical due to use of allocator_traits. 77 78 //Should not be called directly, instead use create method skip_list_node(size_type levels)79 skip_list_node( size_type levels ) 80 : my_height(levels), my_index_number(0) 81 {} 82 83 //Should not be called directly, instead use destroy method ~skip_list_node()84 ~skip_list_node() {} 85 86 skip_list_node( const skip_list_node& ) = delete; 87 skip_list_node( skip_list_node&& ) = delete; 88 skip_list_node& operator=( const skip_list_node& ) = delete; 89 skip_list_node& operator=( skip_list_node&& ) = delete; 90 create(container_allocator_type & alloc,size_type height)91 static skip_list_node* create( container_allocator_type& alloc, size_type height ) { 92 size_type sz = calc_node_size(height); 93 static_assert(std::is_same<typename allocator_traits::value_type, std::uint8_t>::value, "skip_list_node assumes that passed in allocator operates on bytes"); 94 auto* node = reinterpret_cast<skip_list_node*>(allocator_traits::allocate(alloc, sz)); 95 96 //Construct the node itself 97 allocator_traits::construct(alloc, node, height); 98 99 //Construct the level pointers 100 for (size_type l = 0; l < height; ++l) { 101 allocator_traits::construct(alloc, &node->get_atomic_next(l), nullptr); 102 } 103 104 return node; 105 } 106 destroy(container_allocator_type & alloc,skip_list_node * node)107 static void destroy( container_allocator_type& alloc, skip_list_node* node ) { 108 //Destroy the level pointers 109 for (size_type l = 0; l < node->height(); ++l) { 110 allocator_traits::destroy(alloc, &node->atomic_next(l)); 111 } 112 size_type sz = calc_node_size(node->height()); 113 // Destroy the node itself 114 allocator_traits::destroy(alloc, node); 115 116 // Deallocate the node 117 allocator_traits::deallocate(alloc, reinterpret_cast<std::uint8_t*>(node), sz); 118 } 119 120 storage()121 pointer storage() { 122 return &my_value; 123 } 124 value()125 reference value() { 126 return *storage(); 127 } 128 next(size_type level)129 node_ptr next( size_type level ) const { 130 node_ptr res = get_atomic_next(level).load(std::memory_order_acquire); 131 __TBB_ASSERT(res == nullptr || res->height() > level, "Broken internal structure"); 132 return res; 133 } 134 atomic_next(size_type level)135 atomic_node_ptr& atomic_next( size_type level ) { 136 atomic_node_ptr& res = get_atomic_next(level); 137 #if TBB_USE_DEBUG 138 node_ptr node = res.load(std::memory_order_acquire); 139 __TBB_ASSERT(node == nullptr || node->height() > level, "Broken internal structure"); 140 #endif 141 return res; 142 } 143 set_next(size_type level,node_ptr n)144 void set_next( size_type level, node_ptr n ) { 145 __TBB_ASSERT(n == nullptr || n->height() > level, "Broken internal structure"); 146 get_atomic_next(level).store(n, std::memory_order_relaxed); 147 } 148 height()149 size_type height() const { 150 return my_height; 151 } 152 set_index_number(size_type index_num)153 void set_index_number( size_type index_num ) { 154 my_index_number = index_num; 155 } 156 index_number()157 size_type index_number() const { 158 return my_index_number; 159 } 160 161 private: calc_node_size(size_type height)162 static size_type calc_node_size( size_type height ) { 163 static_assert(alignof(skip_list_node) >= alignof(atomic_node_ptr), "Incorrect alignment"); 164 return sizeof(skip_list_node) + height * sizeof(atomic_node_ptr); 165 } 166 get_atomic_next(size_type level)167 atomic_node_ptr& get_atomic_next( size_type level ) { 168 atomic_node_ptr* arr = reinterpret_cast<atomic_node_ptr*>(this + 1); 169 return arr[level]; 170 } 171 get_atomic_next(size_type level)172 const atomic_node_ptr& get_atomic_next( size_type level ) const { 173 const atomic_node_ptr* arr = reinterpret_cast<const atomic_node_ptr*>(this + 1); 174 return arr[level]; 175 } 176 177 union { 178 value_type my_value; 179 }; 180 size_type my_height; 181 size_type my_index_number; 182 }; // class skip_list_node 183 184 template <typename NodeType, typename ValueType> 185 class skip_list_iterator { 186 using node_type = NodeType; 187 using node_ptr = node_type*; 188 public: 189 using iterator_category = std::forward_iterator_tag; 190 using value_type = ValueType; 191 192 using difference_type = std::ptrdiff_t; 193 using pointer = value_type*; 194 using reference = value_type&; 195 skip_list_iterator()196 skip_list_iterator() : skip_list_iterator(nullptr) {} 197 skip_list_iterator(const skip_list_iterator<node_type,typename node_type::value_type> & other)198 skip_list_iterator( const skip_list_iterator<node_type, typename node_type::value_type>& other ) 199 : my_node_ptr(other.my_node_ptr) {} 200 201 skip_list_iterator& operator=( const skip_list_iterator<node_type, typename node_type::value_type>& other ) { 202 my_node_ptr = other.my_node_ptr; 203 return *this; 204 } 205 206 reference operator*() const { return my_node_ptr->value(); } 207 pointer operator->() const { return my_node_ptr->storage(); } 208 209 skip_list_iterator& operator++() { 210 __TBB_ASSERT(my_node_ptr != nullptr, nullptr); 211 my_node_ptr = my_node_ptr->next(0); 212 return *this; 213 } 214 215 skip_list_iterator operator++(int) { 216 skip_list_iterator tmp = *this; 217 ++*this; 218 return tmp; 219 } 220 221 private: skip_list_iterator(node_type * n)222 skip_list_iterator(node_type* n) : my_node_ptr(n) {} 223 224 node_ptr my_node_ptr; 225 226 template <typename Traits> 227 friend class concurrent_skip_list; 228 229 template <typename N, typename V> 230 friend class skip_list_iterator; 231 232 friend class const_range; 233 friend class range; 234 235 friend bool operator==( const skip_list_iterator& lhs, const skip_list_iterator& rhs ) { 236 return lhs.my_node_ptr == rhs.my_node_ptr; 237 } 238 239 friend bool operator!=( const skip_list_iterator& lhs, const skip_list_iterator& rhs ) { 240 return lhs.my_node_ptr != rhs.my_node_ptr; 241 } 242 }; // class skip_list_iterator 243 244 template <typename Traits> 245 class concurrent_skip_list { 246 protected: 247 using container_traits = Traits; 248 using self_type = concurrent_skip_list<container_traits>; 249 using allocator_type = typename container_traits::allocator_type; 250 using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>; 251 using key_compare = typename container_traits::compare_type; 252 using value_compare = typename container_traits::value_compare; 253 using key_type = typename container_traits::key_type; 254 using value_type = typename container_traits::value_type; 255 static_assert(std::is_same<value_type, typename allocator_type::value_type>::value, 256 "value_type of the container should be the same as its allocator"); 257 258 using size_type = std::size_t; 259 using difference_type = std::ptrdiff_t; 260 261 static constexpr size_type max_level = container_traits::max_level; 262 263 using node_allocator_type = typename allocator_traits_type::template rebind_alloc<std::uint8_t>; 264 using node_allocator_traits = tbb::detail::allocator_traits<node_allocator_type>; 265 266 using list_node_type = skip_list_node<value_type, node_allocator_type>; 267 using node_type = d1::node_handle<key_type, value_type, list_node_type, allocator_type>; 268 269 using iterator = skip_list_iterator<list_node_type, value_type>; 270 using const_iterator = skip_list_iterator<list_node_type, const value_type>; 271 272 using reference = value_type&; 273 using const_reference = const value_type&; 274 using pointer = typename allocator_traits_type::pointer; 275 using const_pointer = typename allocator_traits_type::const_pointer; 276 277 using random_level_generator_type = typename container_traits::random_level_generator_type; 278 279 using node_ptr = list_node_type*; 280 281 using array_type = std::array<node_ptr, max_level>; 282 private: 283 template <typename T> 284 using is_transparent = dependent_bool<comp_is_transparent<key_compare>, T>; 285 public: 286 static constexpr bool allow_multimapping = container_traits::allow_multimapping; 287 concurrent_skip_list()288 concurrent_skip_list() : my_head_ptr(nullptr), my_size(0), my_max_height(0) {} 289 290 explicit concurrent_skip_list( const key_compare& comp, const allocator_type& alloc = allocator_type() ) my_node_allocator(alloc)291 : my_node_allocator(alloc), my_compare(comp), my_head_ptr(nullptr), my_size(0), my_max_height(0) {} 292 concurrent_skip_list(const allocator_type & alloc)293 explicit concurrent_skip_list( const allocator_type& alloc ) 294 : concurrent_skip_list(key_compare(), alloc) {} 295 296 template<typename InputIterator> 297 concurrent_skip_list( InputIterator first, InputIterator last, const key_compare& comp = key_compare(), 298 const allocator_type& alloc = allocator_type() ) concurrent_skip_list(comp,alloc)299 : concurrent_skip_list(comp, alloc) 300 { 301 internal_copy(first, last); 302 } 303 304 template <typename InputIterator> concurrent_skip_list(InputIterator first,InputIterator last,const allocator_type & alloc)305 concurrent_skip_list( InputIterator first, InputIterator last, const allocator_type& alloc ) 306 : concurrent_skip_list(first, last, key_compare(), alloc) {} 307 308 concurrent_skip_list( std::initializer_list<value_type> init, const key_compare& comp = key_compare(), 309 const allocator_type& alloc = allocator_type() ) 310 : concurrent_skip_list(init.begin(), init.end(), comp, alloc) {} 311 concurrent_skip_list(std::initializer_list<value_type> init,const allocator_type & alloc)312 concurrent_skip_list( std::initializer_list<value_type> init, const allocator_type& alloc ) 313 : concurrent_skip_list(init, key_compare(), alloc) {} 314 concurrent_skip_list(const concurrent_skip_list & other)315 concurrent_skip_list( const concurrent_skip_list& other ) 316 : my_node_allocator(node_allocator_traits::select_on_container_copy_construction(other.get_allocator())), 317 my_compare(other.my_compare), my_rng(other.my_rng), my_head_ptr(nullptr), 318 my_size(0), my_max_height(0) 319 { 320 internal_copy(other); 321 __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container"); 322 } 323 concurrent_skip_list(const concurrent_skip_list & other,const allocator_type & alloc)324 concurrent_skip_list( const concurrent_skip_list& other, const allocator_type& alloc ) 325 : my_node_allocator(alloc), my_compare(other.my_compare), my_rng(other.my_rng), my_head_ptr(nullptr), 326 my_size(0), my_max_height(0) 327 { 328 internal_copy(other); 329 __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container"); 330 } 331 concurrent_skip_list(concurrent_skip_list && other)332 concurrent_skip_list( concurrent_skip_list&& other ) 333 : my_node_allocator(std::move(other.my_node_allocator)), my_compare(other.my_compare), 334 my_rng(std::move(other.my_rng)), my_head_ptr(nullptr) // my_head_ptr would be stored in internal_move 335 { 336 internal_move(std::move(other)); 337 } 338 concurrent_skip_list(concurrent_skip_list && other,const allocator_type & alloc)339 concurrent_skip_list( concurrent_skip_list&& other, const allocator_type& alloc ) 340 : my_node_allocator(alloc), my_compare(other.my_compare), 341 my_rng(std::move(other.my_rng)), my_head_ptr(nullptr) 342 { 343 using is_always_equal = typename allocator_traits_type::is_always_equal; 344 internal_move_construct_with_allocator(std::move(other), is_always_equal()); 345 } 346 ~concurrent_skip_list()347 ~concurrent_skip_list() { 348 clear(); 349 delete_head(); 350 } 351 352 concurrent_skip_list& operator=( const concurrent_skip_list& other ) { 353 if (this != &other) { 354 clear(); 355 copy_assign_allocators(my_node_allocator, other.my_node_allocator); 356 my_compare = other.my_compare; 357 my_rng = other.my_rng; 358 internal_copy(other); 359 } 360 return *this; 361 } 362 363 concurrent_skip_list& operator=( concurrent_skip_list&& other ) { 364 if (this != &other) { 365 clear(); 366 delete_head(); 367 368 my_compare = std::move(other.my_compare); 369 my_rng = std::move(other.my_rng); 370 371 move_assign_allocators(my_node_allocator, other.my_node_allocator); 372 using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment; 373 using is_always_equal = typename node_allocator_traits::is_always_equal; 374 internal_move_assign(std::move(other), tbb::detail::disjunction<pocma_type, is_always_equal>()); 375 } 376 return *this; 377 } 378 379 concurrent_skip_list& operator=( std::initializer_list<value_type> il ) 380 { 381 clear(); 382 insert(il.begin(),il.end()); 383 return *this; 384 } 385 insert(const value_type & value)386 std::pair<iterator, bool> insert( const value_type& value ) { 387 return internal_insert(value); 388 } 389 insert(value_type && value)390 std::pair<iterator, bool> insert( value_type&& value ) { 391 return internal_insert(std::move(value)); 392 } 393 insert(const_iterator,const_reference value)394 iterator insert( const_iterator, const_reference value ) { 395 // Ignore hint 396 return insert(value).first; 397 } 398 insert(const_iterator,value_type && value)399 iterator insert( const_iterator, value_type&& value ) { 400 // Ignore hint 401 return insert(std::move(value)).first; 402 } 403 404 template<typename InputIterator> insert(InputIterator first,InputIterator last)405 void insert( InputIterator first, InputIterator last ) { 406 while (first != last) { 407 insert(*first); 408 ++first; 409 } 410 } 411 insert(std::initializer_list<value_type> init)412 void insert( std::initializer_list<value_type> init ) { 413 insert(init.begin(), init.end()); 414 } 415 insert(node_type && nh)416 std::pair<iterator, bool> insert( node_type&& nh ) { 417 if (!nh.empty()) { 418 auto insert_node = d1::node_handle_accessor::get_node_ptr(nh); 419 std::pair<iterator, bool> insert_result = internal_insert_node(insert_node); 420 if (insert_result.second) { 421 d1::node_handle_accessor::deactivate(nh); 422 } 423 return insert_result; 424 } 425 return std::pair<iterator, bool>(end(), false); 426 } 427 insert(const_iterator,node_type && nh)428 iterator insert( const_iterator, node_type&& nh ) { 429 // Ignore hint 430 return insert(std::move(nh)).first; 431 } 432 433 template<typename... Args> emplace(Args &&...args)434 std::pair<iterator, bool> emplace( Args&&... args ) { 435 return internal_insert(std::forward<Args>(args)...); 436 } 437 438 template<typename... Args> emplace_hint(const_iterator,Args &&...args)439 iterator emplace_hint( const_iterator, Args&&... args ) { 440 // Ignore hint 441 return emplace(std::forward<Args>(args)...).first; 442 } 443 unsafe_erase(iterator pos)444 iterator unsafe_erase( iterator pos ) { 445 std::pair<node_ptr, node_ptr> extract_result = internal_extract(pos); 446 if (extract_result.first) { // node was extracted 447 delete_value_node(extract_result.first); 448 return extract_result.second; 449 } 450 return end(); 451 } 452 unsafe_erase(const_iterator pos)453 iterator unsafe_erase( const_iterator pos ) { 454 return unsafe_erase(get_iterator(pos)); 455 } 456 unsafe_erase(const_iterator first,const_iterator last)457 iterator unsafe_erase( const_iterator first, const_iterator last ) { 458 while (first != last) { 459 // Unsafe erase returns the iterator which follows the erased one 460 first = unsafe_erase(first); 461 } 462 return get_iterator(first); 463 } 464 unsafe_erase(const key_type & key)465 size_type unsafe_erase( const key_type& key ) { 466 return internal_erase(key); 467 } 468 469 template <typename K> 470 typename std::enable_if<is_transparent<K>::value 471 && !std::is_convertible<K, const_iterator>::value 472 && !std::is_convertible<K, iterator>::value, unsafe_erase(const K & key)473 size_type>::type unsafe_erase( const K& key ) 474 { 475 return internal_erase(key); 476 } 477 unsafe_extract(const_iterator pos)478 node_type unsafe_extract( const_iterator pos ) { 479 std::pair<node_ptr, node_ptr> extract_result = internal_extract(pos); 480 return extract_result.first ? d1::node_handle_accessor::construct<node_type>(extract_result.first) : node_type(); 481 } 482 unsafe_extract(iterator pos)483 node_type unsafe_extract( iterator pos ) { 484 return unsafe_extract(const_iterator(pos)); 485 } 486 unsafe_extract(const key_type & key)487 node_type unsafe_extract( const key_type& key ) { 488 return unsafe_extract(find(key)); 489 } 490 491 template <typename K> 492 typename std::enable_if<is_transparent<K>::value 493 && !std::is_convertible<K, const_iterator>::value 494 && !std::is_convertible<K, iterator>::value, unsafe_extract(const K & key)495 node_type>::type unsafe_extract( const K& key ) 496 { 497 return unsafe_extract(find(key)); 498 } 499 lower_bound(const key_type & key)500 iterator lower_bound( const key_type& key ) { 501 return iterator(internal_get_bound(key, my_compare)); 502 } 503 lower_bound(const key_type & key)504 const_iterator lower_bound( const key_type& key ) const { 505 return const_iterator(internal_get_bound(key, my_compare)); 506 } 507 508 template <typename K> lower_bound(const K & key)509 typename std::enable_if<is_transparent<K>::value, iterator>::type lower_bound( const K& key ) { 510 return iterator(internal_get_bound(key, my_compare)); 511 } 512 513 template <typename K> lower_bound(const K & key)514 typename std::enable_if<is_transparent<K>::value, const_iterator>::type lower_bound( const K& key ) const { 515 return const_iterator(internal_get_bound(key, my_compare)); 516 } 517 upper_bound(const key_type & key)518 iterator upper_bound( const key_type& key ) { 519 return iterator(internal_get_bound(key, not_greater_compare(my_compare))); 520 } 521 upper_bound(const key_type & key)522 const_iterator upper_bound( const key_type& key ) const { 523 return const_iterator(internal_get_bound(key, not_greater_compare(my_compare))); 524 } 525 526 template <typename K> upper_bound(const K & key)527 typename std::enable_if<is_transparent<K>::value, iterator>::type upper_bound( const K& key ) { 528 return iterator(internal_get_bound(key, not_greater_compare(my_compare))); 529 } 530 531 template <typename K> upper_bound(const K & key)532 typename std::enable_if<is_transparent<K>::value, const_iterator>::type upper_bound( const K& key ) const { 533 return const_iterator(internal_get_bound(key, not_greater_compare(my_compare))); 534 } 535 find(const key_type & key)536 iterator find( const key_type& key ) { 537 return iterator(internal_find(key)); 538 } 539 find(const key_type & key)540 const_iterator find( const key_type& key ) const { 541 return const_iterator(internal_find(key)); 542 } 543 544 template <typename K> find(const K & key)545 typename std::enable_if<is_transparent<K>::value, iterator>::type find( const K& key ) { 546 return iterator(internal_find(key)); 547 } 548 549 template <typename K> find(const K & key)550 typename std::enable_if<is_transparent<K>::value, const_iterator>::type find( const K& key ) const { 551 return const_iterator(internal_find(key)); 552 } 553 count(const key_type & key)554 size_type count( const key_type& key ) const { 555 return internal_count(key); 556 } 557 558 template <typename K> count(const K & key)559 typename std::enable_if<is_transparent<K>::value, size_type>::type count( const K& key ) const { 560 return internal_count(key); 561 } 562 contains(const key_type & key)563 bool contains( const key_type& key ) const { 564 return find(key) != end(); 565 } 566 567 template <typename K> contains(const K & key)568 typename std::enable_if<is_transparent<K>::value, bool>::type contains( const K& key ) const { 569 return find(key) != end(); 570 } 571 clear()572 void clear() noexcept { 573 // clear is not thread safe - load can be relaxed 574 node_ptr head = my_head_ptr.load(std::memory_order_relaxed); 575 576 if (head == nullptr) return; // Head is not allocated => container is empty 577 578 node_ptr current = head->next(0); 579 580 // Delete all value nodes in the container 581 while (current) { 582 node_ptr next = current->next(0); 583 delete_value_node(current); 584 current = next; 585 } 586 587 for (size_type level = 0; level < head->height(); ++level) { 588 head->set_next(level, nullptr); 589 } 590 591 my_size.store(0, std::memory_order_relaxed); 592 my_max_height.store(0, std::memory_order_relaxed); 593 } 594 begin()595 iterator begin() { 596 return iterator(internal_begin()); 597 } 598 begin()599 const_iterator begin() const { 600 return const_iterator(internal_begin()); 601 } 602 cbegin()603 const_iterator cbegin() const { 604 return const_iterator(internal_begin()); 605 } 606 end()607 iterator end() { 608 return iterator(nullptr); 609 } 610 end()611 const_iterator end() const { 612 return const_iterator(nullptr); 613 } 614 cend()615 const_iterator cend() const { 616 return const_iterator(nullptr); 617 } 618 size()619 size_type size() const { 620 return my_size.load(std::memory_order_relaxed); 621 } 622 max_size()623 size_type max_size() const { 624 return node_allocator_traits::max_size(my_node_allocator); 625 } 626 empty()627 __TBB_nodiscard bool empty() const { 628 return 0 == size(); 629 } 630 get_allocator()631 allocator_type get_allocator() const { 632 return my_node_allocator; 633 } 634 swap(concurrent_skip_list & other)635 void swap(concurrent_skip_list& other) { 636 if (this != &other) { 637 using pocs_type = typename node_allocator_traits::propagate_on_container_swap; 638 using is_always_equal = typename node_allocator_traits::is_always_equal; 639 internal_swap(other, tbb::detail::disjunction<pocs_type, is_always_equal>()); 640 } 641 } 642 equal_range(const key_type & key)643 std::pair<iterator, iterator> equal_range(const key_type& key) { 644 return internal_equal_range(key); 645 } 646 equal_range(const key_type & key)647 std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const { 648 return internal_equal_range(key); 649 } 650 651 template <typename K> equal_range(const K & key)652 typename std::enable_if<is_transparent<K>::value, std::pair<iterator, iterator>>::type equal_range( const K& key ) { 653 return internal_equal_range(key); 654 } 655 656 template <typename K> equal_range(const K & key)657 typename std::enable_if<is_transparent<K>::value, std::pair<const_iterator, const_iterator>>::type equal_range( const K& key ) const { 658 return internal_equal_range(key); 659 } 660 key_comp()661 key_compare key_comp() const { return my_compare; } 662 value_comp()663 value_compare value_comp() const { return container_traits::value_comp(my_compare); } 664 665 class const_range_type { 666 public: 667 using size_type = typename concurrent_skip_list::size_type; 668 using difference_type = typename concurrent_skip_list::difference_type; 669 using iterator = typename concurrent_skip_list::const_iterator; 670 using value_type = typename iterator::value_type; 671 using reference = typename iterator::reference; 672 empty()673 bool empty() const { 674 return my_begin.my_node_ptr ? (my_begin.my_node_ptr->next(0) == my_end.my_node_ptr) 675 : true; 676 } 677 is_divisible()678 bool is_divisible() const { 679 return my_begin.my_node_ptr && my_level != 0 680 ? my_begin.my_node_ptr->next(my_level - 1) != my_end.my_node_ptr 681 : false; 682 } 683 size()684 size_type size() const { return std::distance(my_begin, my_end); } 685 const_range_type(const_range_type & r,split)686 const_range_type( const_range_type& r, split) 687 : my_end(r.my_end) { 688 if (r.empty()) { 689 __TBB_ASSERT(my_end.my_node_ptr == nullptr, nullptr); 690 my_begin = my_end; 691 my_level = 0; 692 } else { 693 my_begin = iterator(r.my_begin.my_node_ptr->next(r.my_level - 1)); 694 my_level = my_begin.my_node_ptr->height(); 695 } 696 r.my_end = my_begin; 697 } 698 const_range_type(const concurrent_skip_list & l)699 const_range_type( const concurrent_skip_list& l) 700 : my_end(l.end()), my_begin(l.begin()), 701 my_level(my_begin.my_node_ptr ? my_begin.my_node_ptr->height() : 0) {} 702 begin()703 iterator begin() const { return my_begin; } end()704 iterator end() const { return my_end; } grainsize()705 size_type grainsize() const { return 1; } 706 707 private: 708 const_iterator my_end; 709 const_iterator my_begin; 710 size_type my_level; 711 }; // class const_range_type 712 713 class range_type : public const_range_type { 714 public: 715 using iterator = typename concurrent_skip_list::iterator; 716 using value_type = typename iterator::value_type; 717 using reference = typename iterator::reference; 718 range_type(range_type & r,split)719 range_type(range_type& r, split) : const_range_type(r, split()) {} range_type(const concurrent_skip_list & l)720 range_type(const concurrent_skip_list& l) : const_range_type(l) {} 721 begin()722 iterator begin() const { 723 node_ptr node = const_range_type::begin().my_node_ptr; 724 return iterator(node); 725 } 726 end()727 iterator end() const { 728 node_ptr node = const_range_type::end().my_node_ptr; 729 return iterator(node); 730 } 731 }; // class range_type 732 range()733 range_type range() { return range_type(*this); } range()734 const_range_type range() const { return const_range_type(*this); } 735 736 private: internal_begin()737 node_ptr internal_begin() const { 738 node_ptr head = get_head(); 739 return head == nullptr ? head : head->next(0); 740 } 741 internal_move(concurrent_skip_list && other)742 void internal_move(concurrent_skip_list&& other) { 743 my_head_ptr.store(other.my_head_ptr.load(std::memory_order_relaxed), std::memory_order_relaxed); 744 other.my_head_ptr.store(nullptr, std::memory_order_relaxed); 745 746 my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed); 747 other.my_size.store(0, std::memory_order_relaxed); 748 749 my_max_height.store(other.my_max_height.load(std::memory_order_relaxed), std::memory_order_relaxed); 750 other.my_max_height.store(0, std::memory_order_relaxed); 751 } 752 internal_move_construct_with_allocator(concurrent_skip_list && other,std::true_type)753 void internal_move_construct_with_allocator(concurrent_skip_list&& other, 754 /*is_always_equal = */std::true_type) { 755 internal_move(std::move(other)); 756 } 757 internal_move_construct_with_allocator(concurrent_skip_list && other,std::false_type)758 void internal_move_construct_with_allocator(concurrent_skip_list&& other, 759 /*is_always_equal = */std::false_type) { 760 if (my_node_allocator == other.get_allocator()) { 761 internal_move(std::move(other)); 762 } else { 763 my_size.store(0, std::memory_order_relaxed); 764 my_max_height.store(other.my_max_height.load(std::memory_order_relaxed), std::memory_order_relaxed); 765 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end())); 766 } 767 } 768 get_key(node_ptr n)769 static const key_type& get_key( node_ptr n ) { 770 __TBB_ASSERT(n, nullptr); 771 return container_traits::get_key(static_cast<node_ptr>(n)->value()); 772 } 773 774 template <typename K> found(node_ptr node,const K & key)775 bool found( node_ptr node, const K& key ) const { 776 return node != nullptr && !my_compare(key, get_key(node)); 777 } 778 779 template <typename K> internal_find(const K & key)780 node_ptr internal_find(const K& key) const { 781 return allow_multimapping ? internal_find_multi(key) : internal_find_unique(key); 782 } 783 784 template <typename K> internal_find_multi(const K & key)785 node_ptr internal_find_multi( const K& key ) const { 786 node_ptr prev = get_head(); 787 if (prev == nullptr) return nullptr; // If the head node is not allocated - exit 788 789 node_ptr curr = nullptr; 790 node_ptr old_curr = curr; 791 792 for (size_type h = my_max_height.load(std::memory_order_acquire); h > 0; --h) { 793 curr = internal_find_position(h - 1, prev, key, my_compare); 794 795 if (curr != old_curr && found(curr, key)) { 796 return curr; 797 } 798 old_curr = curr; 799 } 800 return nullptr; 801 } 802 803 template <typename K> internal_find_unique(const K & key)804 node_ptr internal_find_unique( const K& key ) const { 805 const_iterator it = lower_bound(key); 806 return (it == end() || my_compare(key, container_traits::get_key(*it))) ? nullptr : it.my_node_ptr; 807 } 808 809 template <typename K> internal_count(const K & key)810 size_type internal_count( const K& key ) const { 811 if (allow_multimapping) { 812 // TODO: reimplement without double traversal 813 std::pair<const_iterator, const_iterator> r = equal_range(key); 814 return std::distance(r.first, r.second); 815 } 816 return size_type(contains(key) ? 1 : 0); 817 } 818 819 template <typename K> internal_equal_range(const K & key)820 std::pair<iterator, iterator> internal_equal_range(const K& key) const { 821 iterator lb = get_iterator(lower_bound(key)); 822 auto result = std::make_pair(lb, lb); 823 824 // If the lower bound points to the node with the requested key 825 if (found(lb.my_node_ptr, key)) { 826 827 if (!allow_multimapping) { 828 // For unique containers - move the second iterator forward and exit 829 ++result.second; 830 } else { 831 // For multi containers - find the upper bound starting from the lower bound 832 node_ptr prev = lb.my_node_ptr; 833 node_ptr curr = nullptr; 834 not_greater_compare cmp(my_compare); 835 836 // Start from the lower bound of the range 837 for (size_type h = prev->height(); h > 0; --h) { 838 curr = prev->next(h - 1); 839 while (curr && cmp(get_key(curr), key)) { 840 prev = curr; 841 // If the height of the next node is greater than the current one - jump to its height 842 if (h < curr->height()) { 843 h = curr->height(); 844 } 845 curr = prev->next(h - 1); 846 } 847 } 848 result.second = iterator(curr); 849 } 850 } 851 852 return result; 853 } 854 855 // Finds position on the level using comparator cmp starting from the node prev 856 template <typename K, typename Comparator> internal_find_position(size_type level,node_ptr & prev,const K & key,const Comparator & cmp)857 node_ptr internal_find_position( size_type level, node_ptr& prev, const K& key, 858 const Comparator& cmp ) const { 859 __TBB_ASSERT(level < prev->height(), "Wrong level to find position"); 860 node_ptr curr = prev->next(level); 861 862 while (curr && cmp(get_key(curr), key)) { 863 prev = curr; 864 __TBB_ASSERT(level < prev->height(), nullptr); 865 curr = prev->next(level); 866 } 867 868 return curr; 869 } 870 871 // The same as previous overload, but allows index_number comparison 872 template <typename Comparator> internal_find_position(size_type level,node_ptr & prev,node_ptr node,const Comparator & cmp)873 node_ptr internal_find_position( size_type level, node_ptr& prev, node_ptr node, 874 const Comparator& cmp ) const { 875 __TBB_ASSERT(level < prev->height(), "Wrong level to find position"); 876 node_ptr curr = prev->next(level); 877 878 while (curr && cmp(get_key(curr), get_key(node))) { 879 if (allow_multimapping && cmp(get_key(node), get_key(curr)) && curr->index_number() > node->index_number()) { 880 break; 881 } 882 883 prev = curr; 884 __TBB_ASSERT(level < prev->height(), nullptr); 885 curr = prev->next(level); 886 } 887 return curr; 888 } 889 890 template <typename Comparator> fill_prev_curr_arrays(array_type & prev_nodes,array_type & curr_nodes,node_ptr node,const key_type & key,const Comparator & cmp,node_ptr head)891 void fill_prev_curr_arrays(array_type& prev_nodes, array_type& curr_nodes, node_ptr node, const key_type& key, 892 const Comparator& cmp, node_ptr head ) { 893 894 size_type curr_max_height = my_max_height.load(std::memory_order_acquire); 895 size_type node_height = node->height(); 896 if (curr_max_height < node_height) { 897 std::fill(prev_nodes.begin() + curr_max_height, prev_nodes.begin() + node_height, head); 898 std::fill(curr_nodes.begin() + curr_max_height, curr_nodes.begin() + node_height, nullptr); 899 } 900 901 node_ptr prev = head; 902 for (size_type level = curr_max_height; level > 0; --level) { 903 node_ptr curr = internal_find_position(level - 1, prev, key, cmp); 904 prev_nodes[level - 1] = prev; 905 curr_nodes[level - 1] = curr; 906 } 907 } 908 fill_prev_array_for_existing_node(array_type & prev_nodes,node_ptr node)909 void fill_prev_array_for_existing_node( array_type& prev_nodes, node_ptr node ) { 910 node_ptr head = create_head_if_necessary(); 911 prev_nodes.fill(head); 912 913 node_ptr prev = head; 914 for (size_type level = node->height(); level > 0; --level) { 915 while (prev->next(level - 1) != node) { 916 prev = prev->next(level - 1); 917 } 918 prev_nodes[level - 1] = prev; 919 } 920 } 921 922 struct not_greater_compare { 923 const key_compare& my_less_compare; 924 not_greater_comparenot_greater_compare925 not_greater_compare( const key_compare& less_compare ) : my_less_compare(less_compare) {} 926 927 template <typename K1, typename K2> operatornot_greater_compare928 bool operator()( const K1& first, const K2& second ) const { 929 return !my_less_compare(second, first); 930 } 931 }; 932 select_comparator(std::true_type)933 not_greater_compare select_comparator( /*allow_multimapping = */ std::true_type ) { 934 return not_greater_compare(my_compare); 935 } 936 select_comparator(std::false_type)937 key_compare select_comparator( /*allow_multimapping = */ std::false_type ) { 938 return my_compare; 939 } 940 941 template<typename... Args> internal_insert(Args &&...args)942 std::pair<iterator, bool> internal_insert( Args&&... args ) { 943 node_ptr new_node = create_value_node(std::forward<Args>(args)...); 944 std::pair<iterator, bool> insert_result = internal_insert_node(new_node); 945 if (!insert_result.second) { 946 delete_value_node(new_node); 947 } 948 return insert_result; 949 } 950 internal_insert_node(node_ptr new_node)951 std::pair<iterator, bool> internal_insert_node( node_ptr new_node ) { 952 array_type prev_nodes; 953 array_type curr_nodes; 954 size_type new_height = new_node->height(); 955 auto compare = select_comparator(std::integral_constant<bool, allow_multimapping>{}); 956 957 node_ptr head_node = create_head_if_necessary(); 958 959 for (;;) { 960 fill_prev_curr_arrays(prev_nodes, curr_nodes, new_node, get_key(new_node), compare, head_node); 961 962 node_ptr prev = prev_nodes[0]; 963 node_ptr next = curr_nodes[0]; 964 965 if (allow_multimapping) { 966 new_node->set_index_number(prev->index_number() + 1); 967 } else { 968 if (found(next, get_key(new_node))) { 969 return std::pair<iterator, bool>(iterator(next), false); 970 } 971 } 972 973 new_node->set_next(0, next); 974 if (!prev->atomic_next(0).compare_exchange_strong(next, new_node)) { 975 continue; 976 } 977 978 // If the node was successfully linked on the first level - it will be linked on other levels 979 // Insertion cannot fail starting from this point 980 981 // If the height of inserted node is greater than maximum - increase maximum 982 size_type max_height = my_max_height.load(std::memory_order_acquire); 983 for (;;) { 984 if (new_height <= max_height || my_max_height.compare_exchange_strong(max_height, new_height)) { 985 // If the maximum was successfully updated by current thread 986 // or by an other thread for the value, greater or equal to new_height 987 break; 988 } 989 } 990 991 for (std::size_t level = 1; level < new_height; ++level) { 992 // Link the node on upper levels 993 for (;;) { 994 prev = prev_nodes[level]; 995 next = static_cast<node_ptr>(curr_nodes[level]); 996 997 new_node->set_next(level, next); 998 __TBB_ASSERT(new_node->height() > level, "Internal structure break"); 999 if (prev->atomic_next(level).compare_exchange_strong(next, new_node)) { 1000 break; 1001 } 1002 1003 for (size_type lev = level; lev != new_height; ++lev ) { 1004 curr_nodes[lev] = internal_find_position(lev, prev_nodes[lev], new_node, compare); 1005 } 1006 } 1007 } 1008 ++my_size; 1009 return std::pair<iterator, bool>(iterator(new_node), true); 1010 } 1011 } 1012 1013 template <typename K, typename Comparator> internal_get_bound(const K & key,const Comparator & cmp)1014 node_ptr internal_get_bound( const K& key, const Comparator& cmp ) const { 1015 node_ptr prev = get_head(); 1016 if (prev == nullptr) return nullptr; // If the head node is not allocated - exit 1017 1018 node_ptr curr = nullptr; 1019 1020 for (size_type h = my_max_height.load(std::memory_order_acquire); h > 0; --h) { 1021 curr = internal_find_position(h - 1, prev, key, cmp); 1022 } 1023 1024 return curr; 1025 } 1026 1027 template <typename K> internal_erase(const K & key)1028 size_type internal_erase( const K& key ) { 1029 auto eq = equal_range(key); 1030 size_type old_size = size(); 1031 unsafe_erase(eq.first, eq.second); 1032 return old_size - size(); 1033 } 1034 1035 // Returns node_ptr to the extracted node and node_ptr to the next node after the extracted internal_extract(const_iterator it)1036 std::pair<node_ptr, node_ptr> internal_extract( const_iterator it ) { 1037 std::pair<node_ptr, node_ptr> result(nullptr, nullptr); 1038 if ( it != end() ) { 1039 array_type prev_nodes; 1040 1041 node_ptr erase_node = it.my_node_ptr; 1042 node_ptr next_node = erase_node->next(0); 1043 fill_prev_array_for_existing_node(prev_nodes, erase_node); 1044 1045 for (size_type level = 0; level < erase_node->height(); ++level) { 1046 prev_nodes[level]->set_next(level, erase_node->next(level)); 1047 erase_node->set_next(level, nullptr); 1048 } 1049 my_size.fetch_sub(1, std::memory_order_relaxed); 1050 1051 result.first = erase_node; 1052 result.second = next_node; 1053 } 1054 return result; 1055 } 1056 1057 protected: 1058 template<typename SourceType> internal_merge(SourceType && source)1059 void internal_merge( SourceType&& source ) { 1060 using source_type = typename std::decay<SourceType>::type; 1061 using source_iterator = typename source_type::iterator; 1062 static_assert((std::is_same<node_type, typename source_type::node_type>::value), "Incompatible containers cannot be merged"); 1063 1064 for (source_iterator it = source.begin(); it != source.end();) { 1065 source_iterator where = it++; 1066 if (allow_multimapping || !contains(container_traits::get_key(*where))) { 1067 node_type handle = source.unsafe_extract(where); 1068 __TBB_ASSERT(!handle.empty(), "Extracted handle in merge is empty"); 1069 1070 if (!insert(std::move(handle)).second) { 1071 __TBB_ASSERT(!handle.empty(), "Handle should not be empty if insert fails"); 1072 //If the insertion fails - return the node into source 1073 source.insert(std::move(handle)); 1074 } 1075 __TBB_ASSERT(handle.empty(), "Node handle should be empty after the insertion"); 1076 } 1077 } 1078 } 1079 1080 private: internal_copy(const concurrent_skip_list & other)1081 void internal_copy( const concurrent_skip_list& other ) { 1082 internal_copy(other.begin(), other.end()); 1083 } 1084 1085 template<typename Iterator> internal_copy(Iterator first,Iterator last)1086 void internal_copy( Iterator first, Iterator last ) { 1087 try_call([&] { 1088 for (auto it = first; it != last; ++it) { 1089 insert(*it); 1090 } 1091 }).on_exception([&] { 1092 clear(); 1093 delete_head(); 1094 }); 1095 } 1096 create_node(size_type height)1097 node_ptr create_node( size_type height ) { 1098 return list_node_type::create(my_node_allocator, height); 1099 } 1100 1101 template <typename... Args> create_value_node(Args &&...args)1102 node_ptr create_value_node( Args&&... args ) { 1103 node_ptr node = create_node(my_rng()); 1104 1105 // try_call API is not convenient here due to broken 1106 // variadic capture on GCC 4.8.5 1107 auto value_guard = make_raii_guard([&] { 1108 delete_node(node); 1109 }); 1110 1111 // Construct the value inside the node 1112 node_allocator_traits::construct(my_node_allocator, node->storage(), std::forward<Args>(args)...); 1113 value_guard.dismiss(); 1114 return node; 1115 } 1116 create_head_node()1117 node_ptr create_head_node() { 1118 return create_node(max_level); 1119 } 1120 delete_head()1121 void delete_head() { 1122 node_ptr head = my_head_ptr.load(std::memory_order_relaxed); 1123 if (head != nullptr) { 1124 delete_node(head); 1125 my_head_ptr.store(nullptr, std::memory_order_relaxed); 1126 } 1127 } 1128 delete_node(node_ptr node)1129 void delete_node( node_ptr node ) { 1130 list_node_type::destroy(my_node_allocator, node); 1131 } 1132 delete_value_node(node_ptr node)1133 void delete_value_node( node_ptr node ) { 1134 // Destroy the value inside the node 1135 node_allocator_traits::destroy(my_node_allocator, node->storage()); 1136 delete_node(node); 1137 } 1138 get_head()1139 node_ptr get_head() const { 1140 return my_head_ptr.load(std::memory_order_acquire); 1141 } 1142 create_head_if_necessary()1143 node_ptr create_head_if_necessary() { 1144 node_ptr current_head = get_head(); 1145 if (current_head == nullptr) { 1146 // Head node was not created - create it 1147 node_ptr new_head = create_head_node(); 1148 if (my_head_ptr.compare_exchange_strong(current_head, new_head)) { 1149 current_head = new_head; 1150 } else { 1151 // If an other thread has already created the head node - destroy new_head 1152 // current_head now points to the actual head node 1153 delete_node(new_head); 1154 } 1155 } 1156 __TBB_ASSERT(my_head_ptr.load(std::memory_order_relaxed) != nullptr, nullptr); 1157 __TBB_ASSERT(current_head != nullptr, nullptr); 1158 return current_head; 1159 } 1160 get_iterator(const_iterator it)1161 static iterator get_iterator( const_iterator it ) { 1162 return iterator(it.my_node_ptr); 1163 } 1164 internal_move_assign(concurrent_skip_list && other,std::true_type)1165 void internal_move_assign( concurrent_skip_list&& other, /*POCMA || is_always_equal =*/std::true_type ) { 1166 internal_move(std::move(other)); 1167 } 1168 internal_move_assign(concurrent_skip_list && other,std::false_type)1169 void internal_move_assign( concurrent_skip_list&& other, /*POCMA || is_always_equal =*/std::false_type ) { 1170 if (my_node_allocator == other.my_node_allocator) { 1171 internal_move(std::move(other)); 1172 } else { 1173 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end())); 1174 } 1175 } 1176 internal_swap_fields(concurrent_skip_list & other)1177 void internal_swap_fields( concurrent_skip_list& other ) { 1178 using std::swap; 1179 swap_allocators(my_node_allocator, other.my_node_allocator); 1180 swap(my_compare, other.my_compare); 1181 swap(my_rng, other.my_rng); 1182 1183 swap_atomics_relaxed(my_head_ptr, other.my_head_ptr); 1184 swap_atomics_relaxed(my_size, other.my_size); 1185 swap_atomics_relaxed(my_max_height, other.my_max_height); 1186 } 1187 internal_swap(concurrent_skip_list & other,std::true_type)1188 void internal_swap( concurrent_skip_list& other, /*POCMA || is_always_equal =*/std::true_type ) { 1189 internal_swap_fields(other); 1190 } 1191 internal_swap(concurrent_skip_list & other,std::false_type)1192 void internal_swap( concurrent_skip_list& other, /*POCMA || is_always_equal =*/std::false_type ) { 1193 __TBB_ASSERT(my_node_allocator == other.my_node_allocator, "Swapping with unequal allocators is not allowed"); 1194 internal_swap_fields(other); 1195 } 1196 1197 node_allocator_type my_node_allocator; 1198 key_compare my_compare; 1199 random_level_generator_type my_rng; 1200 std::atomic<list_node_type*> my_head_ptr; 1201 std::atomic<size_type> my_size; 1202 std::atomic<size_type> my_max_height; 1203 1204 template<typename OtherTraits> 1205 friend class concurrent_skip_list; 1206 }; // class concurrent_skip_list 1207 1208 template <typename Traits> 1209 bool operator==( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1210 if (lhs.size() != rhs.size()) return false; 1211 #if _MSC_VER 1212 // Passing "unchecked" iterators to std::equal with 3 parameters 1213 // causes compiler warnings. 1214 // The workaround is to use overload with 4 parameters, which is 1215 // available since C++14 - minimally supported version on MSVC 1216 return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); 1217 #else 1218 return std::equal(lhs.begin(), lhs.end(), rhs.begin()); 1219 #endif 1220 } 1221 1222 #if !__TBB_CPP20_COMPARISONS_PRESENT 1223 template <typename Traits> 1224 bool operator!=( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1225 return !(lhs == rhs); 1226 } 1227 #endif 1228 1229 #if __TBB_CPP20_COMPARISONS_PRESENT && __TBB_CPP20_CONCEPTS_PRESENT 1230 template <typename Traits> 1231 tbb::detail::synthesized_three_way_result<typename Traits::value_type> 1232 operator<=>( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1233 return std::lexicographical_compare_three_way(lhs.begin(), lhs.end(), 1234 rhs.begin(), rhs.end(), 1235 tbb::detail::synthesized_three_way_comparator{}); 1236 } 1237 #else 1238 template <typename Traits> 1239 bool operator<( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1240 return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); 1241 } 1242 1243 template <typename Traits> 1244 bool operator>( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1245 return rhs < lhs; 1246 } 1247 1248 template <typename Traits> 1249 bool operator<=( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1250 return !(rhs < lhs); 1251 } 1252 1253 template <typename Traits> 1254 bool operator>=( const concurrent_skip_list<Traits>& lhs, const concurrent_skip_list<Traits>& rhs ) { 1255 return !(lhs < rhs); 1256 } 1257 #endif // __TBB_CPP20_COMPARISONS_PRESENT && __TBB_CPP20_CONCEPTS_PRESENT 1258 1259 // Generates a number from the interval [0, MaxLevel). 1260 template <std::size_t MaxLevel> 1261 class concurrent_geometric_level_generator { 1262 public: 1263 static constexpr std::size_t max_level = MaxLevel; 1264 // TODO: modify the algorithm to accept other values of max_level 1265 static_assert(max_level == 32, "Incompatible max_level for rng"); 1266 concurrent_geometric_level_generator()1267 concurrent_geometric_level_generator() : engines(std::minstd_rand::result_type(time(nullptr))) {} 1268 operator()1269 std::size_t operator()() { 1270 // +1 is required to pass at least 1 into log2 (log2(0) is undefined) 1271 // -1 is required to have an ability to return 0 from the generator (max_level - log2(2^31) - 1) 1272 std::size_t result = max_level - std::size_t(tbb::detail::log2(engines.local()() + 1)) - 1; 1273 __TBB_ASSERT(result <= max_level, nullptr); 1274 return result; 1275 } 1276 1277 private: 1278 tbb::enumerable_thread_specific<std::minstd_rand> engines; 1279 }; 1280 1281 } // namespace d2 1282 1283 } // namespace detail 1284 } // namespace tbb 1285 1286 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) 1287 #pragma warning(pop) // warning 4127 is back 1288 #endif 1289 1290 #endif // __TBB_detail__concurrent_skip_list_H 1291