1 /*
2     Copyright (c) 2005-2022 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_queuing_rw_mutex_H
18 #define __TBB_queuing_rw_mutex_H
19 
20 #include "detail/_config.h"
21 #include "detail/_namespace_injection.h"
22 #include "detail/_assert.h"
23 #include "detail/_mutex_common.h"
24 
25 #include "profiling.h"
26 
27 #include <cstring>
28 #include <atomic>
29 
30 namespace tbb {
31 namespace detail {
32 namespace r1 {
33 struct queuing_rw_mutex_impl;
34 }
35 namespace d1 {
36 
37 //! Queuing reader-writer mutex with local-only spinning.
38 /** Adapted from Krieger, Stumm, et al. pseudocode at
39     https://www.researchgate.net/publication/221083709_A_Fair_Fast_Scalable_Reader-Writer_Lock
40     @ingroup synchronization */
41 class queuing_rw_mutex {
42     friend r1::queuing_rw_mutex_impl;
43 public:
44     //! Construct unacquired mutex.
queuing_rw_mutex()45     queuing_rw_mutex() noexcept  {
46         create_itt_sync(this, "tbb::queuing_rw_mutex", "");
47     }
48 
49     //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-null
~queuing_rw_mutex()50     ~queuing_rw_mutex() {
51         __TBB_ASSERT(q_tail.load(std::memory_order_relaxed) == nullptr, "destruction of an acquired mutex");
52     }
53 
54     //! No Copy
55     queuing_rw_mutex(const queuing_rw_mutex&) = delete;
56     queuing_rw_mutex& operator=(const queuing_rw_mutex&) = delete;
57 
58     //! The scoped locking pattern
59     /** It helps to avoid the common problem of forgetting to release lock.
60         It also nicely provides the "node" for queuing locks. */
61     class scoped_lock {
62         friend r1::queuing_rw_mutex_impl;
63         //! Initialize fields to mean "no lock held".
initialize()64         void initialize() {
65             my_mutex = nullptr;
66             my_internal_lock.store(0, std::memory_order_relaxed);
67             my_going.store(0, std::memory_order_relaxed);
68 #if TBB_USE_ASSERT
69             my_state = 0xFF; // Set to invalid state
70             my_next.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed);
71             my_prev.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed);
72 #endif /* TBB_USE_ASSERT */
73         }
74 
75     public:
76         //! Construct lock that has not acquired a mutex.
77         /** Equivalent to zero-initialization of *this. */
scoped_lock()78         scoped_lock() {initialize();}
79 
80         //! Acquire lock on given mutex.
81         scoped_lock( queuing_rw_mutex& m, bool write=true ) {
82             initialize();
83             acquire(m,write);
84         }
85 
86         //! Release lock (if lock is held).
~scoped_lock()87         ~scoped_lock() {
88             if( my_mutex ) release();
89         }
90 
91         //! No Copy
92         scoped_lock(const scoped_lock&) = delete;
93         scoped_lock& operator=(const scoped_lock&) = delete;
94 
95         //! Acquire lock on given mutex.
96         void acquire( queuing_rw_mutex& m, bool write=true );
97 
98         //! Acquire lock on given mutex if free (i.e. non-blocking)
99         bool try_acquire( queuing_rw_mutex& m, bool write=true );
100 
101         //! Release lock.
102         void release();
103 
104         //! Upgrade reader to become a writer.
105         /** Returns whether the upgrade happened without releasing and re-acquiring the lock */
106         bool upgrade_to_writer();
107 
108         //! Downgrade writer to become a reader.
109         bool downgrade_to_reader();
110 
111         bool is_writer() const;
112 
113     private:
114         //! The pointer to the mutex owned, or nullptr if not holding a mutex.
115         queuing_rw_mutex* my_mutex;
116 
117         //! The 'pointer' to the previous and next competitors for a mutex
118         std::atomic<uintptr_t> my_prev;
119         std::atomic<uintptr_t> my_next;
120 
121         using state_t = unsigned char ;
122 
123         //! State of the request: reader, writer, active reader, other service states
124         std::atomic<state_t> my_state;
125 
126         //! The local spin-wait variable
127         /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */
128         std::atomic<unsigned char> my_going;
129 
130         //! A tiny internal lock
131         std::atomic<unsigned char> my_internal_lock;
132     };
133 
134     // Mutex traits
135     static constexpr bool is_rw_mutex = true;
136     static constexpr bool is_recursive_mutex = false;
137     static constexpr bool is_fair_mutex = true;
138 
139 private:
140     //! The last competitor requesting the lock
141     std::atomic<scoped_lock*> q_tail{nullptr};
142 };
143 #if TBB_USE_PROFILING_TOOLS
set_name(queuing_rw_mutex & obj,const char * name)144 inline void set_name(queuing_rw_mutex& obj, const char* name) {
145     itt_set_sync_name(&obj, name);
146 }
147 #if (_WIN32||_WIN64)
set_name(queuing_rw_mutex & obj,const wchar_t * name)148 inline void set_name(queuing_rw_mutex& obj, const wchar_t* name) {
149     itt_set_sync_name(&obj, name);
150 }
151 #endif //WIN
152 #else
set_name(queuing_rw_mutex &,const char *)153 inline void set_name(queuing_rw_mutex&, const char*) {}
154 #if (_WIN32||_WIN64)
set_name(queuing_rw_mutex &,const wchar_t *)155 inline void set_name(queuing_rw_mutex&, const wchar_t*) {}
156 #endif //WIN
157 #endif
158 } // namespace d1
159 
160 namespace r1 {
161 TBB_EXPORT void acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool);
162 TBB_EXPORT bool try_acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool);
163 TBB_EXPORT void release(d1::queuing_rw_mutex::scoped_lock&);
164 TBB_EXPORT bool upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock&);
165 TBB_EXPORT bool downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock&);
166 TBB_EXPORT bool is_writer(const d1::queuing_rw_mutex::scoped_lock&);
167 } // namespace r1
168 
169 namespace d1 {
170 
171 
acquire(queuing_rw_mutex & m,bool write)172 inline void queuing_rw_mutex::scoped_lock::acquire(queuing_rw_mutex& m,bool write) {
173     r1::acquire(m, *this, write);
174 }
175 
try_acquire(queuing_rw_mutex & m,bool write)176 inline bool queuing_rw_mutex::scoped_lock::try_acquire(queuing_rw_mutex& m, bool write) {
177     return r1::try_acquire(m, *this, write);
178 }
179 
release()180 inline void queuing_rw_mutex::scoped_lock::release() {
181     r1::release(*this);
182 }
183 
upgrade_to_writer()184 inline bool queuing_rw_mutex::scoped_lock::upgrade_to_writer() {
185     return r1::upgrade_to_writer(*this);
186 }
187 
downgrade_to_reader()188 inline bool queuing_rw_mutex::scoped_lock::downgrade_to_reader() {
189     return r1::downgrade_to_reader(*this);
190 }
191 
is_writer()192 inline bool queuing_rw_mutex::scoped_lock::is_writer() const {
193     return r1::is_writer(*this);
194 }
195 } // namespace d1
196 
197 } // namespace detail
198 
199 inline namespace v1 {
200 using detail::d1::queuing_rw_mutex;
201 } // namespace v1
202 namespace profiling {
203     using detail::d1::set_name;
204 }
205 } // namespace tbb
206 
207 #endif /* __TBB_queuing_rw_mutex_H */
208