1 /*
2 Copyright (c) 2005-2023 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #ifndef __TBB_detail__utils_H
18 #define __TBB_detail__utils_H
19
20 #include <type_traits>
21 #include <cstdint>
22 #include <atomic>
23 #include <functional>
24
25 #include "_config.h"
26 #include "_assert.h"
27 #include "_machine.h"
28
29 namespace tbb {
30 namespace detail {
31 inline namespace d0 {
32
33 //! Utility template function to prevent "unused" warnings by various compilers.
suppress_unused_warning(T &&...)34 template<typename... T> void suppress_unused_warning(T&&...) {}
35
36 //! Compile-time constant that is upper bound on cache line/sector size.
37 /** It should be used only in situations where having a compile-time upper
38 bound is more useful than a run-time exact answer.
39 @ingroup memory_allocation */
40 constexpr size_t max_nfs_size = 128;
41 constexpr std::size_t max_nfs_size_exp = 7;
42 static_assert(1 << max_nfs_size_exp == max_nfs_size, "max_nfs_size_exp must be a log2(max_nfs_size)");
43
44 //! Class that implements exponential backoff.
45 class atomic_backoff {
46 //! Time delay, in units of "pause" instructions.
47 /** Should be equal to approximately the number of "pause" instructions
48 that take the same time as an context switch. Must be a power of two.*/
49 static constexpr std::int32_t LOOPS_BEFORE_YIELD = 16;
50 std::int32_t count;
51
52 public:
53 // In many cases, an object of this type is initialized eagerly on hot path,
54 // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ }
55 // For this reason, the construction cost must be very small!
atomic_backoff()56 atomic_backoff() : count(1) {}
57 // This constructor pauses immediately; do not use on hot paths!
atomic_backoff(bool)58 atomic_backoff(bool) : count(1) { pause(); }
59
60 //! No Copy
61 atomic_backoff(const atomic_backoff&) = delete;
62 atomic_backoff& operator=(const atomic_backoff&) = delete;
63
64 //! Pause for a while.
pause()65 void pause() {
66 if (count <= LOOPS_BEFORE_YIELD) {
67 machine_pause(count);
68 // Pause twice as long the next time.
69 count *= 2;
70 } else {
71 // Pause is so long that we might as well yield CPU to scheduler.
72 yield();
73 }
74 }
75
76 //! Pause for a few times and return false if saturated.
bounded_pause()77 bool bounded_pause() {
78 machine_pause(count);
79 if (count < LOOPS_BEFORE_YIELD) {
80 // Pause twice as long the next time.
81 count *= 2;
82 return true;
83 } else {
84 return false;
85 }
86 }
87
reset()88 void reset() {
89 count = 1;
90 }
91 };
92
93 //! Spin WHILE the condition is true.
94 /** T and U should be comparable types. */
95 template <typename T, typename C>
spin_wait_while(const std::atomic<T> & location,C comp,std::memory_order order)96 T spin_wait_while(const std::atomic<T>& location, C comp, std::memory_order order) {
97 atomic_backoff backoff;
98 T snapshot = location.load(order);
99 while (comp(snapshot)) {
100 backoff.pause();
101 snapshot = location.load(order);
102 }
103 return snapshot;
104 }
105
106 //! Spin WHILE the value of the variable is equal to a given value
107 /** T and U should be comparable types. */
108 template <typename T, typename U>
109 T spin_wait_while_eq(const std::atomic<T>& location, const U value, std::memory_order order = std::memory_order_acquire) {
110 return spin_wait_while(location, [&value](T t) { return t == value; }, order);
111 }
112
113 //! Spin UNTIL the value of the variable is equal to a given value
114 /** T and U should be comparable types. */
115 template<typename T, typename U>
116 T spin_wait_until_eq(const std::atomic<T>& location, const U value, std::memory_order order = std::memory_order_acquire) {
117 return spin_wait_while(location, [&value](T t) { return t != value; }, order);
118 }
119
120 //! Spin UNTIL the condition returns true or spinning time is up.
121 /** Returns what the passed functor returned last time it was invoked. */
122 template <typename Condition>
timed_spin_wait_until(Condition condition)123 bool timed_spin_wait_until(Condition condition) {
124 // 32 pauses + 32 yields are meausered as balanced spin time before sleep.
125 bool finish = condition();
126 for (int i = 1; !finish && i < 32; finish = condition(), i *= 2) {
127 machine_pause(i);
128 }
129 for (int i = 32; !finish && i < 64; finish = condition(), ++i) {
130 yield();
131 }
132 return finish;
133 }
134
135 template <typename T>
clamp(T value,T lower_bound,T upper_bound)136 T clamp(T value, T lower_bound, T upper_bound) {
137 __TBB_ASSERT(lower_bound <= upper_bound, "Incorrect bounds");
138 return value > lower_bound ? (value > upper_bound ? upper_bound : value) : lower_bound;
139 }
140
141 template <typename T>
log2(T in)142 std::uintptr_t log2(T in) {
143 __TBB_ASSERT(in > 0, "The logarithm of a non-positive value is undefined.");
144 return machine_log2(in);
145 }
146
147 template<typename T>
reverse_bits(T src)148 T reverse_bits(T src) {
149 return machine_reverse_bits(src);
150 }
151
152 template<typename T>
reverse_n_bits(T src,std::size_t n)153 T reverse_n_bits(T src, std::size_t n) {
154 __TBB_ASSERT(n != 0, "Reverse for 0 bits is undefined behavior.");
155 return reverse_bits(src) >> (number_of_bits<T>() - n);
156 }
157
158 // A function to check if passed integer is a power of two
159 template <typename IntegerType>
is_power_of_two(IntegerType arg)160 constexpr bool is_power_of_two( IntegerType arg ) {
161 static_assert(std::is_integral<IntegerType>::value,
162 "An argument for is_power_of_two should be integral type");
163 return arg && (0 == (arg & (arg - 1)));
164 }
165
166 // A function to determine if passed integer is a power of two
167 // at least as big as another power of two, i.e. for strictly positive i and j,
168 // with j being a power of two, determines whether i==j<<k for some nonnegative k
169 template <typename ArgIntegerType, typename DivisorIntegerType>
is_power_of_two_at_least(ArgIntegerType arg,DivisorIntegerType divisor)170 constexpr bool is_power_of_two_at_least(ArgIntegerType arg, DivisorIntegerType divisor) {
171 // Divisor should be a power of two
172 static_assert(std::is_integral<ArgIntegerType>::value,
173 "An argument for is_power_of_two_at_least should be integral type");
174 return 0 == (arg & (arg - divisor));
175 }
176
177 // A function to compute arg modulo divisor where divisor is a power of 2.
178 template<typename ArgIntegerType, typename DivisorIntegerType>
modulo_power_of_two(ArgIntegerType arg,DivisorIntegerType divisor)179 inline ArgIntegerType modulo_power_of_two(ArgIntegerType arg, DivisorIntegerType divisor) {
180 __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" );
181 return arg & (divisor - 1);
182 }
183
184 //! A function to check if passed in pointer is aligned on a specific border
185 template<typename T>
is_aligned(T * pointer,std::uintptr_t alignment)186 constexpr bool is_aligned(T* pointer, std::uintptr_t alignment) {
187 return 0 == (reinterpret_cast<std::uintptr_t>(pointer) & (alignment - 1));
188 }
189
190 #if TBB_USE_ASSERT
191 static void* const poisoned_ptr = reinterpret_cast<void*>(-1);
192
193 //! Set p to invalid pointer value.
194 template<typename T>
poison_pointer(T * & p)195 inline void poison_pointer( T* &p ) { p = reinterpret_cast<T*>(poisoned_ptr); }
196
197 template<typename T>
poison_pointer(std::atomic<T * > & p)198 inline void poison_pointer(std::atomic<T*>& p) { p.store(reinterpret_cast<T*>(poisoned_ptr), std::memory_order_relaxed); }
199
200 /** Expected to be used in assertions only, thus no empty form is defined. **/
201 template<typename T>
is_poisoned(T * p)202 inline bool is_poisoned( T* p ) { return p == reinterpret_cast<T*>(poisoned_ptr); }
203
204 template<typename T>
is_poisoned(const std::atomic<T * > & p)205 inline bool is_poisoned(const std::atomic<T*>& p) { return is_poisoned(p.load(std::memory_order_relaxed)); }
206 #else
207 template<typename T>
poison_pointer(T &)208 inline void poison_pointer(T&) {/*do nothing*/}
209 #endif /* !TBB_USE_ASSERT */
210
211 template <std::size_t alignment = 0, typename T>
212 bool assert_pointer_valid(T* p, const char* comment = nullptr) {
213 suppress_unused_warning(p, comment);
214 __TBB_ASSERT(p != nullptr, comment);
215 __TBB_ASSERT(!is_poisoned(p), comment);
216 #if !(_MSC_VER && _MSC_VER <= 1900 && !__INTEL_COMPILER)
217 __TBB_ASSERT(is_aligned(p, alignment == 0 ? alignof(T) : alignment), comment);
218 #endif
219 // Returns something to simplify assert_pointers_valid implementation.
220 return true;
221 }
222
223 template <typename... Args>
assert_pointers_valid(Args * ...p)224 void assert_pointers_valid(Args*... p) {
225 // suppress_unused_warning is used as an evaluation context for the variadic pack.
226 suppress_unused_warning(assert_pointer_valid(p)...);
227 }
228
229 //! Base class for types that should not be assigned.
230 class no_assign {
231 public:
232 void operator=(const no_assign&) = delete;
233 no_assign(const no_assign&) = default;
234 no_assign() = default;
235 };
236
237 //! Base class for types that should not be copied or assigned.
238 class no_copy: no_assign {
239 public:
240 no_copy(const no_copy&) = delete;
241 no_copy() = default;
242 };
243
244 template <typename T>
swap_atomics_relaxed(std::atomic<T> & lhs,std::atomic<T> & rhs)245 void swap_atomics_relaxed(std::atomic<T>& lhs, std::atomic<T>& rhs){
246 T tmp = lhs.load(std::memory_order_relaxed);
247 lhs.store(rhs.load(std::memory_order_relaxed), std::memory_order_relaxed);
248 rhs.store(tmp, std::memory_order_relaxed);
249 }
250
251 //! One-time initialization states
252 enum class do_once_state {
253 uninitialized = 0, ///< No execution attempts have been undertaken yet
254 pending, ///< A thread is executing associated do-once routine
255 executed, ///< Do-once routine has been executed
256 initialized = executed ///< Convenience alias
257 };
258
259 //! One-time initialization function
260 /** /param initializer Pointer to function without arguments
261 The variant that returns bool is used for cases when initialization can fail
262 and it is OK to continue execution, but the state should be reset so that
263 the initialization attempt was repeated the next time.
264 /param state Shared state associated with initializer that specifies its
265 initialization state. Must be initially set to #uninitialized value
266 (e.g. by means of default static zero initialization). **/
267 template <typename F>
atomic_do_once(const F & initializer,std::atomic<do_once_state> & state)268 void atomic_do_once( const F& initializer, std::atomic<do_once_state>& state ) {
269 // The loop in the implementation is necessary to avoid race when thread T2
270 // that arrived in the middle of initialization attempt by another thread T1
271 // has just made initialization possible.
272 // In such a case T2 has to rely on T1 to initialize, but T1 may already be past
273 // the point where it can recognize the changed conditions.
274 do_once_state expected_state;
275 while ( state.load( std::memory_order_acquire ) != do_once_state::executed ) {
276 if( state.load( std::memory_order_relaxed ) == do_once_state::uninitialized ) {
277 expected_state = do_once_state::uninitialized;
278 #if defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1910
279 using enum_type = typename std::underlying_type<do_once_state>::type;
280 if( ((std::atomic<enum_type>&)state).compare_exchange_strong( (enum_type&)expected_state, (enum_type)do_once_state::pending ) ) {
281 #else
282 if( state.compare_exchange_strong( expected_state, do_once_state::pending ) ) {
283 #endif
284 run_initializer( initializer, state );
285 break;
286 }
287 }
288 spin_wait_while_eq( state, do_once_state::pending );
289 }
290 }
291
292 // Run the initializer which can not fail
293 template<typename Functor>
294 void run_initializer(const Functor& f, std::atomic<do_once_state>& state ) {
295 f();
296 state.store(do_once_state::executed, std::memory_order_release);
297 }
298
299 #if __TBB_CPP20_CONCEPTS_PRESENT
300 template <typename T>
301 concept boolean_testable_impl = std::convertible_to<T, bool>;
302
303 template <typename T>
304 concept boolean_testable = boolean_testable_impl<T> && requires( T&& t ) {
305 { !std::forward<T>(t) } -> boolean_testable_impl;
306 };
307
308 #if __TBB_CPP20_COMPARISONS_PRESENT
309 struct synthesized_three_way_comparator {
310 template <typename T1, typename T2>
311 auto operator()( const T1& lhs, const T2& rhs ) const
312 requires requires {
313 { lhs < rhs } -> boolean_testable;
314 { rhs < lhs } -> boolean_testable;
315 }
316 {
317 if constexpr (std::three_way_comparable_with<T1, T2>) {
318 return lhs <=> rhs;
319 } else {
320 if (lhs < rhs) {
321 return std::weak_ordering::less;
322 }
323 if (rhs < lhs) {
324 return std::weak_ordering::greater;
325 }
326 return std::weak_ordering::equivalent;
327 }
328 }
329 }; // struct synthesized_three_way_comparator
330
331 template <typename T1, typename T2 = T1>
332 using synthesized_three_way_result = decltype(synthesized_three_way_comparator{}(std::declval<T1&>(),
333 std::declval<T2&>()));
334
335 #endif // __TBB_CPP20_COMPARISONS_PRESENT
336
337 // Check if the type T is implicitly OR explicitly convertible to U
338 template <typename T, typename U>
339 concept relaxed_convertible_to = std::constructible_from<U, T>;
340
341 template <typename T, typename U>
342 concept adaptive_same_as =
343 #if __TBB_STRICT_CONSTRAINTS
344 std::same_as<T, U>;
345 #else
346 std::convertible_to<T, U>;
347 #endif
348 #endif // __TBB_CPP20_CONCEPTS_PRESENT
349
350 template <typename F, typename... Args>
351 auto invoke(F&& f, Args&&... args)
352 #if __TBB_CPP17_INVOKE_PRESENT
353 noexcept(std::is_nothrow_invocable_v<F, Args...>)
354 -> std::invoke_result_t<F, Args...>
355 {
356 return std::invoke(std::forward<F>(f), std::forward<Args>(args)...);
357 }
358 #else // __TBB_CPP17_INVOKE_PRESENT
359 noexcept(noexcept(std::forward<F>(f)(std::forward<Args>(args)...)))
360 -> decltype(std::forward<F>(f)(std::forward<Args>(args)...))
361 {
362 return std::forward<F>(f)(std::forward<Args>(args)...);
363 }
364 #endif // __TBB_CPP17_INVOKE_PRESENT
365
366 } // namespace d0
367
368 namespace d1 {
369
370 class delegate_base {
371 public:
372 virtual bool operator()() const = 0;
~delegate_base()373 virtual ~delegate_base() {}
374 };
375
376 template <typename FuncType>
377 class delegated_function : public delegate_base {
378 public:
delegated_function(FuncType & f)379 delegated_function(FuncType& f) : my_func(f) {}
380
operator()381 bool operator()() const override {
382 return my_func();
383 }
384
385 private:
386 FuncType &my_func;
387 };
388 } // namespace d1
389
390 } // namespace detail
391 } // namespace tbb
392
393 #endif // __TBB_detail__utils_H
394