1 /*
2     Copyright (c) 2005-2022 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_detail__segment_table_H
18 #define __TBB_detail__segment_table_H
19 
20 #include "_config.h"
21 #include "_allocator_traits.h"
22 #include "_template_helpers.h"
23 #include "_utils.h"
24 #include "_assert.h"
25 #include "_exception.h"
26 #include <atomic>
27 #include <type_traits>
28 #include <memory>
29 #include <cstring>
30 
31 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
32 #pragma warning(push)
33 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
34 #endif
35 
36 namespace tbb {
37 namespace detail {
38 namespace d1 {
39 
40 template <typename T, typename Allocator, typename DerivedType, std::size_t PointersPerEmbeddedTable>
41 class segment_table {
42 public:
43     using value_type = T;
44     using segment_type = T*;
45     using atomic_segment = std::atomic<segment_type>;
46     using segment_table_type = atomic_segment*;
47 
48     using size_type = std::size_t;
49     using segment_index_type = std::size_t;
50 
51     using allocator_type = Allocator;
52 
53     using allocator_traits_type = tbb::detail::allocator_traits<allocator_type>;
54     using segment_table_allocator_type = typename allocator_traits_type::template rebind_alloc<atomic_segment>;
55 protected:
56     using segment_table_allocator_traits = tbb::detail::allocator_traits<segment_table_allocator_type>;
57     using derived_type = DerivedType;
58 
59     static constexpr size_type pointers_per_embedded_table = PointersPerEmbeddedTable;
60     static constexpr size_type pointers_per_long_table = sizeof(size_type) * 8;
61 public:
62     segment_table( const allocator_type& alloc = allocator_type() )
my_segment_table_allocator(alloc)63         : my_segment_table_allocator(alloc), my_segment_table(nullptr)
64         , my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
65     {
66         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
67         zero_table(my_embedded_table, pointers_per_embedded_table);
68     }
69 
segment_table(const segment_table & other)70     segment_table( const segment_table& other )
71         : my_segment_table_allocator(segment_table_allocator_traits::
72                                      select_on_container_copy_construction(other.my_segment_table_allocator))
73         , my_segment_table(nullptr), my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
74     {
75         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
76         zero_table(my_embedded_table, pointers_per_embedded_table);
77         try_call( [&] {
78             internal_transfer(other, copy_segment_body_type{*this});
79         } ).on_exception( [&] {
80             clear();
81         });
82     }
83 
segment_table(const segment_table & other,const allocator_type & alloc)84     segment_table( const segment_table& other, const allocator_type& alloc )
85         : my_segment_table_allocator(alloc), my_segment_table(nullptr)
86         , my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
87     {
88         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
89         zero_table(my_embedded_table, pointers_per_embedded_table);
90         try_call( [&] {
91             internal_transfer(other, copy_segment_body_type{*this});
92         } ).on_exception( [&] {
93             clear();
94         });
95     }
96 
segment_table(segment_table && other)97     segment_table( segment_table&& other )
98         : my_segment_table_allocator(std::move(other.my_segment_table_allocator)), my_segment_table(nullptr)
99         , my_first_block{}, my_size{}, my_segment_table_allocation_failed{}
100     {
101         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
102         zero_table(my_embedded_table, pointers_per_embedded_table);
103         internal_move(std::move(other));
104     }
105 
segment_table(segment_table && other,const allocator_type & alloc)106     segment_table( segment_table&& other, const allocator_type& alloc )
107         : my_segment_table_allocator(alloc), my_segment_table(nullptr), my_first_block{}
108         , my_size{}, my_segment_table_allocation_failed{}
109     {
110         my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
111         zero_table(my_embedded_table, pointers_per_embedded_table);
112         using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
113         internal_move_construct_with_allocator(std::move(other), alloc, is_equal_type());
114     }
115 
~segment_table()116     ~segment_table() {
117         clear();
118     }
119 
120     segment_table& operator=( const segment_table& other ) {
121         if (this != &other) {
122             copy_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
123             internal_transfer(other, copy_segment_body_type{*this});
124         }
125         return *this;
126     }
127 
128     segment_table& operator=( segment_table&& other )
noexcept(derived_type::is_noexcept_assignment)129         noexcept(derived_type::is_noexcept_assignment)
130     {
131         using pocma_type = typename segment_table_allocator_traits::propagate_on_container_move_assignment;
132         using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
133 
134         if (this != &other) {
135             move_assign_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
136             internal_move_assign(std::move(other), tbb::detail::disjunction<is_equal_type, pocma_type>());
137         }
138         return *this;
139     }
140 
swap(segment_table & other)141     void swap( segment_table& other )
142         noexcept(derived_type::is_noexcept_swap)
143     {
144         using is_equal_type = typename segment_table_allocator_traits::is_always_equal;
145         using pocs_type = typename segment_table_allocator_traits::propagate_on_container_swap;
146 
147         if (this != &other) {
148             swap_allocators(my_segment_table_allocator, other.my_segment_table_allocator);
149             internal_swap(other, tbb::detail::disjunction<is_equal_type, pocs_type>());
150         }
151     }
152 
get_segment(segment_index_type index)153     segment_type get_segment( segment_index_type index ) const {
154         return get_table()[index] + segment_base(index);
155     }
156 
157     value_type& operator[]( size_type index ) {
158         return internal_subscript<true>(index);
159     }
160 
161     const value_type& operator[]( size_type index ) const {
162         return const_cast<segment_table*>(this)->internal_subscript<true>(index);
163     }
164 
get_allocator()165     const segment_table_allocator_type& get_allocator() const {
166         return my_segment_table_allocator;
167     }
168 
get_allocator()169     segment_table_allocator_type& get_allocator() {
170         return my_segment_table_allocator;
171     }
172 
enable_segment(segment_type & segment,segment_table_type table,segment_index_type seg_index,size_type index)173     void enable_segment( segment_type& segment, segment_table_type table, segment_index_type seg_index, size_type index ) {
174         // Allocate new segment
175         segment_type new_segment = self()->create_segment(table, seg_index, index);
176         if (new_segment != nullptr) {
177             // Store (new_segment - segment_base) into the segment table to allow access to the table by index via
178             // my_segment_table[segment_index_of(index)][index]
179             segment_type disabled_segment = nullptr;
180             if (!table[seg_index].compare_exchange_strong(disabled_segment, new_segment - segment_base(seg_index))) {
181                 // compare_exchange failed => some other thread has already enabled this segment
182                 // Deallocate the memory
183                 self()->deallocate_segment(new_segment, seg_index);
184             }
185         }
186 
187         segment = table[seg_index].load(std::memory_order_acquire);
188         __TBB_ASSERT(segment != nullptr, "If create_segment returned nullptr, the element should be stored in the table");
189     }
190 
delete_segment(segment_index_type seg_index)191     void delete_segment( segment_index_type seg_index ) {
192         segment_type segment_to_delete = self()->nullify_segment(get_table(), seg_index);
193         if (segment_to_delete == segment_allocation_failure_tag) {
194             return;
195         }
196 
197         segment_to_delete += segment_base(seg_index);
198 
199         // Deallocate the segment
200         self()->destroy_segment(segment_to_delete, seg_index);
201     }
202 
number_of_segments(segment_table_type table)203     size_type number_of_segments( segment_table_type table ) const {
204         // Check for an active table, if it is embedded table - return the number of embedded segments
205         // Otherwise - return the maximum number of segments
206         return table == my_embedded_table ? pointers_per_embedded_table : pointers_per_long_table;
207     }
208 
capacity()209     size_type capacity() const noexcept {
210         segment_table_type table = get_table();
211         size_type num_segments = number_of_segments(table);
212         for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) {
213             // Check if the pointer is valid (allocated)
214             if (table[seg_index].load(std::memory_order_relaxed) <= segment_allocation_failure_tag) {
215                 return segment_base(seg_index);
216             }
217         }
218         return segment_base(num_segments);
219     }
220 
find_last_allocated_segment(segment_table_type table)221     size_type find_last_allocated_segment( segment_table_type table ) const noexcept {
222         size_type end = 0;
223         size_type num_segments = number_of_segments(table);
224         for (size_type seg_index = 0; seg_index < num_segments; ++seg_index) {
225             // Check if the pointer is valid (allocated)
226             if (table[seg_index].load(std::memory_order_relaxed) > segment_allocation_failure_tag) {
227                 end = seg_index + 1;
228             }
229         }
230         return end;
231     }
232 
reserve(size_type n)233     void reserve( size_type n ) {
234         if (n > allocator_traits_type::max_size(my_segment_table_allocator)) {
235             throw_exception(exception_id::reservation_length_error);
236         }
237 
238         size_type size = my_size.load(std::memory_order_relaxed);
239         segment_index_type start_seg_idx = size == 0 ? 0 : segment_index_of(size - 1) + 1;
240         for (segment_index_type seg_idx = start_seg_idx; segment_base(seg_idx) < n; ++seg_idx) {
241                 size_type first_index = segment_base(seg_idx);
242                 internal_subscript<true>(first_index);
243         }
244     }
245 
clear()246     void clear() {
247         clear_segments();
248         clear_table();
249         my_size.store(0, std::memory_order_relaxed);
250         my_first_block.store(0, std::memory_order_relaxed);
251     }
252 
clear_segments()253     void clear_segments() {
254         segment_table_type current_segment_table = get_table();
255         for (size_type i = number_of_segments(current_segment_table); i != 0; --i) {
256             if (current_segment_table[i - 1].load(std::memory_order_relaxed) != nullptr) {
257                 // If the segment was enabled - disable and deallocate it
258                 delete_segment(i - 1);
259             }
260         }
261     }
262 
clear_table()263     void clear_table() {
264         segment_table_type current_segment_table = get_table();
265         if (current_segment_table != my_embedded_table) {
266             // If the active table is not the embedded one - deallocate the active table
267             for (size_type i = 0; i != pointers_per_long_table; ++i) {
268                 segment_table_allocator_traits::destroy(my_segment_table_allocator, &current_segment_table[i]);
269             }
270 
271             segment_table_allocator_traits::deallocate(my_segment_table_allocator, current_segment_table, pointers_per_long_table);
272             my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
273             zero_table(my_embedded_table, pointers_per_embedded_table);
274         }
275     }
276 
extend_table_if_necessary(segment_table_type & table,size_type start_index,size_type end_index)277     void extend_table_if_necessary(segment_table_type& table, size_type start_index, size_type end_index) {
278         // extend_segment_table if an active table is an embedded table
279         // and the requested index is not in the embedded table
280         if (table == my_embedded_table && end_index > embedded_table_size) {
281             if (start_index <= embedded_table_size) {
282                 try_call([&] {
283                     table = self()->allocate_long_table(my_embedded_table, start_index);
284                     // It is possible that the table was extended by the thread that allocated first_block.
285                     // In this case it is necessary to re-read the current table.
286 
287                     if (table) {
288                         my_segment_table.store(table, std::memory_order_release);
289                     } else {
290                         table = my_segment_table.load(std::memory_order_acquire);
291                     }
292                 }).on_exception([&] {
293                     my_segment_table_allocation_failed.store(true, std::memory_order_relaxed);
294                 });
295             } else {
296                 atomic_backoff backoff;
297                 do {
298                     if (my_segment_table_allocation_failed.load(std::memory_order_relaxed)) {
299                         throw_exception(exception_id::bad_alloc);
300                     }
301                     backoff.pause();
302                     table = my_segment_table.load(std::memory_order_acquire);
303                 } while (table == my_embedded_table);
304             }
305         }
306     }
307 
308     // Return the segment where index is stored
segment_index_of(size_type index)309     static constexpr segment_index_type segment_index_of( size_type index ) {
310         return size_type(tbb::detail::log2(uintptr_t(index|1)));
311     }
312 
313     // Needed to calculate the offset in segment
segment_base(size_type index)314     static constexpr size_type segment_base( size_type index ) {
315         return size_type(1) << index & ~size_type(1);
316     }
317 
318     // Return size of the segment
segment_size(size_type index)319     static constexpr size_type segment_size( size_type index ) {
320         return index == 0 ? 2 : size_type(1) << index;
321     }
322 
323 private:
324 
self()325     derived_type* self() {
326         return static_cast<derived_type*>(this);
327     }
328 
329     struct copy_segment_body_type {
operatorcopy_segment_body_type330         void operator()( segment_index_type index, segment_type from, segment_type to ) const {
331             my_instance.self()->copy_segment(index, from, to);
332         }
333         segment_table& my_instance;
334     };
335 
336     struct move_segment_body_type {
operatormove_segment_body_type337         void operator()( segment_index_type index, segment_type from, segment_type to ) const {
338             my_instance.self()->move_segment(index, from, to);
339         }
340         segment_table& my_instance;
341     };
342 
343     // Transgers all segments from the other table
344     template <typename TransferBody>
internal_transfer(const segment_table & other,TransferBody transfer_segment)345     void internal_transfer( const segment_table& other, TransferBody transfer_segment ) {
346         static_cast<derived_type*>(this)->destroy_elements();
347 
348         assign_first_block_if_necessary(other.my_first_block.load(std::memory_order_relaxed));
349         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
350 
351         segment_table_type other_table = other.get_table();
352         size_type end_segment_size = segment_size(other.find_last_allocated_segment(other_table));
353 
354         // If an exception occurred in other, then the size may be greater than the size of the end segment.
355         size_type other_size = end_segment_size < other.my_size.load(std::memory_order_relaxed) ?
356             other.my_size.load(std::memory_order_relaxed) : end_segment_size;
357         other_size = my_segment_table_allocation_failed ? embedded_table_size : other_size;
358 
359         for (segment_index_type i = 0; segment_base(i) < other_size; ++i) {
360             // If the segment in other table is enabled - transfer it
361             if (other_table[i].load(std::memory_order_relaxed) == segment_allocation_failure_tag)
362             {
363                     my_size = segment_base(i);
364                     break;
365             } else if (other_table[i].load(std::memory_order_relaxed) != nullptr) {
366                 internal_subscript<true>(segment_base(i));
367                 transfer_segment(i, other.get_table()[i].load(std::memory_order_relaxed) + segment_base(i),
368                                 get_table()[i].load(std::memory_order_relaxed) + segment_base(i));
369             }
370         }
371     }
372 
373     // Moves the other segment table
374     // Only equal allocators are allowed
internal_move(segment_table && other)375     void internal_move( segment_table&& other ) {
376         // NOTE: allocators should be equal
377         clear();
378         my_first_block.store(other.my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed);
379         my_size.store(other.my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
380         // If an active table in other is embedded - restore all of the embedded segments
381         if (other.get_table() == other.my_embedded_table) {
382             for ( size_type i = 0; i != pointers_per_embedded_table; ++i ) {
383                 segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed);
384                 my_embedded_table[i].store(other_segment, std::memory_order_relaxed);
385                 other.my_embedded_table[i].store(nullptr, std::memory_order_relaxed);
386             }
387             my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
388         } else {
389             my_segment_table.store(other.my_segment_table, std::memory_order_relaxed);
390             other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed);
391             zero_table(other.my_embedded_table, pointers_per_embedded_table);
392         }
393         other.my_size.store(0, std::memory_order_relaxed);
394     }
395 
396     // Move construct the segment table with the allocator object
397     // if any instances of allocator_type are always equal
internal_move_construct_with_allocator(segment_table && other,const allocator_type &,std::true_type)398     void internal_move_construct_with_allocator( segment_table&& other, const allocator_type&,
399                                                  /*is_always_equal = */ std::true_type ) {
400         internal_move(std::move(other));
401     }
402 
403     // Move construct the segment table with the allocator object
404     // if any instances of allocator_type are always equal
internal_move_construct_with_allocator(segment_table && other,const allocator_type & alloc,std::false_type)405     void internal_move_construct_with_allocator( segment_table&& other, const allocator_type& alloc,
406                                                  /*is_always_equal = */ std::false_type ) {
407         if (other.my_segment_table_allocator == alloc) {
408             // If allocators are equal - restore pointers
409             internal_move(std::move(other));
410         } else {
411             // If allocators are not equal - perform per element move with reallocation
412             try_call( [&] {
413                 internal_transfer(other, move_segment_body_type{*this});
414             } ).on_exception( [&] {
415                 clear();
416             });
417         }
418     }
419 
420     // Move assigns the segment table to other is any instances of allocator_type are always equal
421     // or propagate_on_container_move_assignment is true
internal_move_assign(segment_table && other,std::true_type)422     void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::true_type ) {
423         internal_move(std::move(other));
424     }
425 
426     // Move assigns the segment table to other is any instances of allocator_type are not always equal
427     // and propagate_on_container_move_assignment is false
internal_move_assign(segment_table && other,std::false_type)428     void internal_move_assign( segment_table&& other, /*is_always_equal || POCMA = */ std::false_type ) {
429         if (my_segment_table_allocator == other.my_segment_table_allocator) {
430             // If allocators are equal - restore pointers
431             internal_move(std::move(other));
432         } else {
433             // If allocators are not equal - perform per element move with reallocation
434             internal_transfer(other, move_segment_body_type{*this});
435         }
436     }
437 
438     // Swaps two segment tables if any instances of allocator_type are always equal
439     // or propagate_on_container_swap is true
internal_swap(segment_table & other,std::true_type)440     void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::true_type ) {
441         internal_swap_fields(other);
442     }
443 
444     // Swaps two segment tables if any instances of allocator_type are not always equal
445     // and propagate_on_container_swap is false
446     // According to the C++ standard, swapping of two containers with unequal allocators
447     // is an undefined behavior scenario
internal_swap(segment_table & other,std::false_type)448     void internal_swap( segment_table& other, /*is_always_equal || POCS = */ std::false_type ) {
449         __TBB_ASSERT(my_segment_table_allocator == other.my_segment_table_allocator,
450                      "Swapping with unequal allocators is not allowed");
451         internal_swap_fields(other);
452     }
453 
internal_swap_fields(segment_table & other)454     void internal_swap_fields( segment_table& other ) {
455         // If an active table in either *this segment table or other is an embedded one - swaps the embedded tables
456         if (get_table() == my_embedded_table ||
457             other.get_table() == other.my_embedded_table) {
458 
459             for (size_type i = 0; i != pointers_per_embedded_table; ++i) {
460                 segment_type current_segment = my_embedded_table[i].load(std::memory_order_relaxed);
461                 segment_type other_segment = other.my_embedded_table[i].load(std::memory_order_relaxed);
462 
463                 my_embedded_table[i].store(other_segment, std::memory_order_relaxed);
464                 other.my_embedded_table[i].store(current_segment, std::memory_order_relaxed);
465             }
466         }
467 
468         segment_table_type current_segment_table = get_table();
469         segment_table_type other_segment_table = other.get_table();
470 
471         // If an active table is an embedded one -
472         // store an active table in other to the embedded one from other
473         if (current_segment_table == my_embedded_table) {
474             other.my_segment_table.store(other.my_embedded_table, std::memory_order_relaxed);
475         } else {
476             // Otherwise - store it to the active segment table
477             other.my_segment_table.store(current_segment_table, std::memory_order_relaxed);
478         }
479 
480         // If an active table in other segment table is an embedded one -
481         // store an active table in other to the embedded one from *this
482         if (other_segment_table == other.my_embedded_table) {
483             my_segment_table.store(my_embedded_table, std::memory_order_relaxed);
484         } else {
485             // Otherwise - store it to the active segment table in other
486             my_segment_table.store(other_segment_table, std::memory_order_relaxed);
487         }
488         auto first_block = other.my_first_block.load(std::memory_order_relaxed);
489         other.my_first_block.store(my_first_block.load(std::memory_order_relaxed), std::memory_order_relaxed);
490         my_first_block.store(first_block, std::memory_order_relaxed);
491 
492         auto size = other.my_size.load(std::memory_order_relaxed);
493         other.my_size.store(my_size.load(std::memory_order_relaxed), std::memory_order_relaxed);
494         my_size.store(size, std::memory_order_relaxed);
495     }
496 
497 protected:
498     // A flag indicates that an exception was throws during segment allocations
499     const segment_type segment_allocation_failure_tag = reinterpret_cast<segment_type>(1);
500     static constexpr size_type embedded_table_size = segment_size(pointers_per_embedded_table);
501 
502     template <bool allow_out_of_range_access>
internal_subscript(size_type index)503     value_type& internal_subscript( size_type index ) {
504         segment_index_type seg_index = segment_index_of(index);
505         segment_table_type table = my_segment_table.load(std::memory_order_acquire);
506         segment_type segment = nullptr;
507 
508         if (allow_out_of_range_access) {
509             if (derived_type::allow_table_extending) {
510                 extend_table_if_necessary(table, index, index + 1);
511             }
512 
513             segment = table[seg_index].load(std::memory_order_acquire);
514             // If the required segment is disabled - enable it
515             if (segment == nullptr) {
516                 enable_segment(segment, table, seg_index, index);
517             }
518             // Check if an exception was thrown during segment allocation
519             if (segment == segment_allocation_failure_tag) {
520                 throw_exception(exception_id::bad_alloc);
521             }
522         } else {
523             segment = table[seg_index].load(std::memory_order_acquire);
524         }
525         __TBB_ASSERT(segment != nullptr, nullptr);
526 
527         return segment[index];
528     }
529 
assign_first_block_if_necessary(segment_index_type index)530     void assign_first_block_if_necessary(segment_index_type index) {
531         size_type zero = 0;
532         if (this->my_first_block.load(std::memory_order_relaxed) == zero) {
533             this->my_first_block.compare_exchange_strong(zero, index);
534         }
535     }
536 
zero_table(segment_table_type table,size_type count)537     void zero_table( segment_table_type table, size_type count ) {
538         for (size_type i = 0; i != count; ++i) {
539             table[i].store(nullptr, std::memory_order_relaxed);
540         }
541     }
542 
get_table()543     segment_table_type get_table() const {
544         return my_segment_table.load(std::memory_order_acquire);
545     }
546 
547     segment_table_allocator_type my_segment_table_allocator;
548     std::atomic<segment_table_type> my_segment_table;
549     atomic_segment my_embedded_table[pointers_per_embedded_table];
550     // Number of segments in first block
551     std::atomic<size_type> my_first_block;
552     // Number of elements in table
553     std::atomic<size_type> my_size;
554     // Flag to indicate failed extend table
555     std::atomic<bool> my_segment_table_allocation_failed;
556 }; // class segment_table
557 
558 } // namespace d1
559 } // namespace detail
560 } // namespace tbb
561 
562 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
563 #pragma warning(pop) // warning 4127 is back
564 #endif
565 
566 #endif // __TBB_detail__segment_table_H
567