1 /* 2 Copyright (c) 2005-2020 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #ifndef __TBB_malloc_Synchronize_H_ 18 #define __TBB_malloc_Synchronize_H_ 19 20 #include "oneapi/tbb/detail/_utils.h" 21 22 #include <atomic> 23 24 //! Stripped down version of spin_mutex. 25 /** Instances of MallocMutex must be declared in memory that is zero-initialized. 26 There are no constructors. This is a feature that lets it be 27 used in situations where the mutex might be used while file-scope constructors 28 are running. 29 30 There are no methods "acquire" or "release". The scoped_lock must be used 31 in a strict block-scoped locking pattern. Omitting these methods permitted 32 further simplification. */ 33 class MallocMutex : tbb::detail::no_copy { 34 std::atomic_flag m_flag = ATOMIC_FLAG_INIT; 35 36 void lock() { 37 tbb::detail::atomic_backoff backoff; 38 while (m_flag.test_and_set()) backoff.pause(); 39 } 40 bool try_lock() { 41 return !m_flag.test_and_set(); 42 } 43 void unlock() { 44 m_flag.clear(std::memory_order_release); 45 } 46 47 public: 48 class scoped_lock : tbb::detail::no_copy { 49 MallocMutex& m_mutex; 50 bool m_taken; 51 52 public: 53 scoped_lock(MallocMutex& m) : m_mutex(m), m_taken(true) { 54 m.lock(); 55 } 56 scoped_lock(MallocMutex& m, bool block, bool *locked) : m_mutex(m), m_taken(false) { 57 if (block) { 58 m.lock(); 59 m_taken = true; 60 } else { 61 m_taken = m.try_lock(); 62 } 63 if (locked) *locked = m_taken; 64 } 65 ~scoped_lock() { 66 if (m_taken) { 67 m_mutex.unlock(); 68 } 69 } 70 }; 71 friend class scoped_lock; 72 }; 73 74 inline void SpinWaitWhileEq(const std::atomic<intptr_t>& location, const intptr_t value) { 75 tbb::detail::spin_wait_while_eq(location, value); 76 } 77 78 inline void SpinWaitUntilEq(const std::atomic<intptr_t>& location, const intptr_t value) { 79 tbb::detail::spin_wait_until_eq(location, value); 80 } 81 82 class AtomicBackoff { 83 tbb::detail::atomic_backoff backoff; 84 public: 85 AtomicBackoff() {} 86 void pause() { backoff.pause(); } 87 }; 88 89 #endif /* __TBB_malloc_Synchronize_H_ */ 90