1 /*
2 Copyright (c) 2005-2022 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #ifndef __TBB_queuing_mutex_H
18 #define __TBB_queuing_mutex_H
19
20 #include "detail/_namespace_injection.h"
21 #include "detail/_assert.h"
22 #include "detail/_utils.h"
23 #include "detail/_mutex_common.h"
24
25 #include "profiling.h"
26
27 #include <atomic>
28
29 namespace tbb {
30 namespace detail {
31 namespace d1 {
32
33 //! Queuing mutex with local-only spinning.
34 /** @ingroup synchronization */
35 class queuing_mutex {
36 public:
37 //! Construct unacquired mutex.
queuing_mutex()38 queuing_mutex() noexcept {
39 create_itt_sync(this, "tbb::queuing_mutex", "");
40 };
41
42 queuing_mutex(const queuing_mutex&) = delete;
43 queuing_mutex& operator=(const queuing_mutex&) = delete;
44
45 //! The scoped locking pattern
46 /** It helps to avoid the common problem of forgetting to release lock.
47 It also nicely provides the "node" for queuing locks. */
48 class scoped_lock {
49 //! Reset fields to mean "no lock held".
reset()50 void reset() {
51 m_mutex = nullptr;
52 }
53
54 public:
55 //! Construct lock that has not acquired a mutex.
56 /** Equivalent to zero-initialization of *this. */
57 scoped_lock() = default;
58
59 //! Acquire lock on given mutex.
scoped_lock(queuing_mutex & m)60 scoped_lock(queuing_mutex& m) {
61 acquire(m);
62 }
63
64 //! Release lock (if lock is held).
~scoped_lock()65 ~scoped_lock() {
66 if (m_mutex) release();
67 }
68
69 //! No Copy
70 scoped_lock( const scoped_lock& ) = delete;
71 scoped_lock& operator=( const scoped_lock& ) = delete;
72
73 //! Acquire lock on given mutex.
acquire(queuing_mutex & m)74 void acquire( queuing_mutex& m ) {
75 __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex");
76
77 // Must set all fields before the exchange, because once the
78 // exchange executes, *this becomes accessible to other threads.
79 m_mutex = &m;
80 m_next.store(nullptr, std::memory_order_relaxed);
81 m_going.store(0U, std::memory_order_relaxed);
82
83 // x86 compare exchange operation always has a strong fence
84 // "sending" the fields initialized above to other processors.
85 scoped_lock* pred = m.q_tail.exchange(this);
86 if (pred) {
87 call_itt_notify(prepare, &m);
88 __TBB_ASSERT(pred->m_next.load(std::memory_order_relaxed) == nullptr, "the predecessor has another successor!");
89
90 pred->m_next.store(this, std::memory_order_release);
91 spin_wait_while_eq(m_going, 0U);
92 }
93 call_itt_notify(acquired, &m);
94
95 }
96
97 //! Acquire lock on given mutex if free (i.e. non-blocking)
try_acquire(queuing_mutex & m)98 bool try_acquire( queuing_mutex& m ) {
99 __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex");
100
101 // Must set all fields before the compare_exchange_strong, because once the
102 // compare_exchange_strong executes, *this becomes accessible to other threads.
103 m_next.store(nullptr, std::memory_order_relaxed);
104 m_going.store(0U, std::memory_order_relaxed);
105
106 scoped_lock* expected = nullptr;
107 // The compare_exchange_strong must have release semantics, because we are
108 // "sending" the fields initialized above to other processors.
109 // x86 compare exchange operation always has a strong fence
110 if (!m.q_tail.compare_exchange_strong(expected, this, std::memory_order_acq_rel))
111 return false;
112
113 m_mutex = &m;
114
115 call_itt_notify(acquired, &m);
116 return true;
117 }
118
119 //! Release lock.
release()120 void release()
121 {
122 __TBB_ASSERT(this->m_mutex, "no lock acquired");
123
124 call_itt_notify(releasing, this->m_mutex);
125
126 if (m_next.load(std::memory_order_relaxed) == nullptr) {
127 scoped_lock* expected = this;
128 if (m_mutex->q_tail.compare_exchange_strong(expected, nullptr)) {
129 // this was the only item in the queue, and the queue is now empty.
130 reset();
131 return;
132 }
133 // Someone in the queue
134 spin_wait_while_eq(m_next, nullptr);
135 }
136 m_next.load(std::memory_order_acquire)->m_going.store(1U, std::memory_order_release);
137
138 reset();
139 }
140
141 private:
142 //! The pointer to the mutex owned, or nullptr if not holding a mutex.
143 queuing_mutex* m_mutex{nullptr};
144
145 //! The pointer to the next competitor for a mutex
146 std::atomic<scoped_lock*> m_next{nullptr};
147
148 //! The local spin-wait variable
149 /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
150 zero-initialization. Defining it as an entire word instead of
151 a byte seems to help performance slightly. */
152 std::atomic<uintptr_t> m_going{0U};
153 };
154
155 // Mutex traits
156 static constexpr bool is_rw_mutex = false;
157 static constexpr bool is_recursive_mutex = false;
158 static constexpr bool is_fair_mutex = true;
159
160 private:
161 //! The last competitor requesting the lock
162 std::atomic<scoped_lock*> q_tail{nullptr};
163
164 };
165
166 #if TBB_USE_PROFILING_TOOLS
set_name(queuing_mutex & obj,const char * name)167 inline void set_name(queuing_mutex& obj, const char* name) {
168 itt_set_sync_name(&obj, name);
169 }
170 #if (_WIN32||_WIN64)
set_name(queuing_mutex & obj,const wchar_t * name)171 inline void set_name(queuing_mutex& obj, const wchar_t* name) {
172 itt_set_sync_name(&obj, name);
173 }
174 #endif //WIN
175 #else
set_name(queuing_mutex &,const char *)176 inline void set_name(queuing_mutex&, const char*) {}
177 #if (_WIN32||_WIN64)
set_name(queuing_mutex &,const wchar_t *)178 inline void set_name(queuing_mutex&, const wchar_t*) {}
179 #endif //WIN
180 #endif
181 } // namespace d1
182 } // namespace detail
183
184 inline namespace v1 {
185 using detail::d1::queuing_mutex;
186 } // namespace v1
187 namespace profiling {
188 using detail::d1::set_name;
189 }
190 } // namespace tbb
191
192 #endif /* __TBB_queuing_mutex_H */
193